From noreply at buildbot.pypy.org Fri Nov 1 09:29:21 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 1 Nov 2013 09:29:21 +0100 (CET) Subject: [pypy-commit] pypy jit-counter: Retry to tweak these numbers Message-ID: <20131101082921.E71981C0112@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-counter Changeset: r67790:ff892c9f2251 Date: 2013-11-01 09:28 +0100 http://bitbucket.org/pypy/pypy/changeset/ff892c9f2251/ Log: Retry to tweak these numbers diff --git a/rpython/jit/metainterp/counter.py b/rpython/jit/metainterp/counter.py --- a/rpython/jit/metainterp/counter.py +++ b/rpython/jit/metainterp/counter.py @@ -9,7 +9,7 @@ class JitCounter: - DEFAULT_SIZE = 8192 + DEFAULT_SIZE = 4096 def __init__(self, size=DEFAULT_SIZE, translator=None): "NOT_RPYTHON" @@ -29,11 +29,11 @@ step = 0 glob = Glob() def invoke_after_minor_collection(): - # After 64 minor collections, we call decay_all_counters(). + # After 32 minor collections, we call decay_all_counters(). # The "--jit decay=N" option measures the amount the # counters are then reduced by. glob.step += 1 - if glob.step == 64: + if glob.step == 32: glob.step = 0 self.decay_all_counters() if not hasattr(translator, '_jit2gc'): From noreply at buildbot.pypy.org Fri Nov 1 14:57:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 1 Nov 2013 14:57:51 +0100 (CET) Subject: [pypy-commit] pypy py3k: issue #1628: fix iter(iter(array.array())) Message-ID: <20131101135751.A8D831C0330@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: py3k Changeset: r67791:b6b883180ed9 Date: 2013-11-01 14:56 +0100 http://bitbucket.org/pypy/pypy/changeset/b6b883180ed9/ Log: issue #1628: fix iter(iter(array.array())) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -645,6 +645,9 @@ def __init__(self, array): self.index = 0 self.array = array + + def iter_w(self, space): + return space.wrap(self) def next_w(self, space): if self.index < self.array.len: @@ -655,6 +658,7 @@ ArrayIterator.typedef = TypeDef( 'arrayiterator', + __iter__ = interp2app(ArrayIterator.iter_w), __next__ = interp2app(ArrayIterator.next_w), ) diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -1154,3 +1154,9 @@ b = array_reconstructor( array.array, 'u', mformat_code, teststr.encode(encoding)) assert a == b + + def test_iterate_iterator(self): + import array + it = iter(array.array('b')) + assert list(it) == [] + assert list(iter(it)) == [] From noreply at buildbot.pypy.org Fri Nov 1 15:17:44 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 1 Nov 2013 15:17:44 +0100 (CET) Subject: [pypy-commit] pypy jit-counter: close branch Message-ID: <20131101141744.838101C0196@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: jit-counter Changeset: r67792:c4ea929d3f70 Date: 2013-11-01 15:10 +0100 http://bitbucket.org/pypy/pypy/changeset/c4ea929d3f70/ Log: close branch From noreply at buildbot.pypy.org Fri Nov 1 15:17:46 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 1 Nov 2013 15:17:46 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge jit-counter Message-ID: <20131101141746.7F8961C0196@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67793:826a087460c9 Date: 2013-11-01 15:16 +0100 http://bitbucket.org/pypy/pypy/changeset/826a087460c9/ Log: hg merge jit-counter Tweak the jit counters: decay them at minor collection (actually only every 32 minor collection is enough). Should avoid the "memory leaks" observed in long-running processes, actually created by the jit compiling more and more rarely executed paths. Done with refactorings to move them to a single table; should reduce total memory usage. Clean-up, particularly for guard_value. diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -32,22 +32,6 @@ name = opcode_method_names[ord(bytecode.co_code[next_instr])] return '%s #%d %s' % (bytecode.get_repr(), next_instr, name) -def make_greenkey_dict_key(next_instr, is_being_profiled): - # use only uints as keys in the jit_cells dict, rather than - # a tuple (next_instr, is_being_profiled) - return ( - (next_instr << 1) | - r_uint(intmask(is_being_profiled)) - ) - -def get_jitcell_at(next_instr, is_being_profiled, bytecode): - key = make_greenkey_dict_key(next_instr, is_being_profiled) - return bytecode.jit_cells.get(key, None) - -def set_jitcell_at(newcell, next_instr, is_being_profiled, bytecode): - key = make_greenkey_dict_key(next_instr, is_being_profiled) - bytecode.jit_cells[key] = newcell - def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode): return (bytecode.co_flags & CO_GENERATOR) != 0 @@ -58,8 +42,6 @@ virtualizables = ['frame'] pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location, - get_jitcell_at = get_jitcell_at, - set_jitcell_at = set_jitcell_at, should_unroll_one_iteration = should_unroll_one_iteration, name='pypyjit') @@ -121,18 +103,6 @@ return intmask(decr_by) -PyCode__initialize = PyCode._initialize - -class __extend__(PyCode): - __metaclass__ = extendabletype - - def _initialize(self): - PyCode__initialize(self) - self.jit_cells = {} - - def _cleanup_(self): - self.jit_cells = {} - # ____________________________________________________________ # # Public interface diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -369,7 +369,9 @@ translator = self.translator self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + if not hasattr(translator, '_jit2gc'): + translator._jit2gc = {} + translator._jit2gc['layoutbuilder'] = self.layoutbuilder def _setup_gcclass(self): from rpython.memory.gcheader import GCHeaderBuilder diff --git a/rpython/jit/codewriter/longlong.py b/rpython/jit/codewriter/longlong.py --- a/rpython/jit/codewriter/longlong.py +++ b/rpython/jit/codewriter/longlong.py @@ -25,6 +25,7 @@ getfloatstorage = lambda x: x getrealfloat = lambda x: x gethash = compute_hash + gethash_fast = longlong2float.float2longlong is_longlong = lambda TYPE: False # ------------------------------------- @@ -40,6 +41,7 @@ getfloatstorage = longlong2float.float2longlong getrealfloat = longlong2float.longlong2float gethash = lambda xll: rarithmetic.intmask(xll - (xll >> 32)) + gethash_fast = gethash is_longlong = lambda TYPE: (TYPE is lltype.SignedLongLong or TYPE is lltype.UnsignedLongLong) diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1,8 +1,9 @@ import weakref -from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import debug_start, debug_stop, debug_print +from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rlib import rstack from rpython.rlib.jit import JitDebugInfo, Counters, dont_look_inside from rpython.conftest import option @@ -483,11 +484,7 @@ pass class ResumeGuardDescr(ResumeDescr): - _counter = 0 # on a GUARD_VALUE, there is one counter per value; - _counters = None # they get stored in _counters then. - # this class also gets the following attributes stored by resume.py code - # XXX move all of unused stuff to guard_op, now that we can have # a separate class, so it does not survive that long rd_snapshot = None @@ -498,18 +495,26 @@ rd_virtuals = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) - CNT_BASE_MASK = 0x0FFFFFFF # the base counter value - CNT_BUSY_FLAG = 0x10000000 # if set, busy tracing from the guard - CNT_TYPE_MASK = 0x60000000 # mask for the type + status = r_uint(0) - CNT_INT = 0x20000000 - CNT_REF = 0x40000000 - CNT_FLOAT = 0x60000000 + ST_BUSY_FLAG = 0x01 # if set, busy tracing from the guard + ST_TYPE_MASK = 0x06 # mask for the type (TY_xxx) + ST_SHIFT = 3 # in "status >> ST_SHIFT" is stored: + # - if TY_NONE, the jitcounter index directly + # - otherwise, the guard_value failarg index + TY_NONE = 0x00 + TY_INT = 0x02 + TY_REF = 0x04 + TY_FLOAT = 0x06 - def store_final_boxes(self, guard_op, boxes): + def store_final_boxes(self, guard_op, boxes, metainterp_sd): guard_op.setfailargs(boxes) self.rd_count = len(boxes) self.guard_opnum = guard_op.getopnum() + # + if metainterp_sd.warmrunnerdesc is not None: # for tests + jitcounter = metainterp_sd.warmrunnerdesc.jitcounter + self.status = jitcounter.fetch_next_index() << self.ST_SHIFT def make_a_counter_per_value(self, guard_value_op): assert guard_value_op.getopnum() == rop.GUARD_VALUE @@ -519,18 +524,15 @@ except ValueError: return # xxx probably very rare else: - if i > self.CNT_BASE_MASK: - return # probably never, but better safe than sorry if box.type == history.INT: - cnt = self.CNT_INT + ty = self.TY_INT elif box.type == history.REF: - cnt = self.CNT_REF + ty = self.TY_REF elif box.type == history.FLOAT: - cnt = self.CNT_FLOAT + ty = self.TY_FLOAT else: assert 0, box.type - assert cnt > self.CNT_BASE_MASK - self._counter = cnt | i + self.status = ty | (r_uint(i) << self.ST_SHIFT) def handle_fail(self, deadframe, metainterp_sd, jitdriver_sd): if self.must_compile(deadframe, metainterp_sd, jitdriver_sd): @@ -557,65 +559,60 @@ _trace_and_compile_from_bridge._dont_inline_ = True def must_compile(self, deadframe, metainterp_sd, jitdriver_sd): - trace_eagerness = jitdriver_sd.warmstate.trace_eagerness + jitcounter = metainterp_sd.warmrunnerdesc.jitcounter # - if self._counter <= self.CNT_BASE_MASK: - # simple case: just counting from 0 to trace_eagerness - self._counter += 1 - return self._counter >= trace_eagerness + if self.status & (self.ST_BUSY_FLAG | self.ST_TYPE_MASK) == 0: + # common case: this is not a guard_value, and we are not + # already busy tracing. The rest of self.status stores a + # valid per-guard index in the jitcounter. + index = self.status >> self.ST_SHIFT # # do we have the BUSY flag? If so, we're tracing right now, e.g. in an # outer invocation of the same function, so don't trace again for now. - elif self._counter & self.CNT_BUSY_FLAG: + elif self.status & self.ST_BUSY_FLAG: return False # - else: # we have a GUARD_VALUE that fails. Make a _counters instance - # (only now, when the guard is actually failing at least once), - # and use it to record some statistics about the failing values. - index = self._counter & self.CNT_BASE_MASK - typetag = self._counter & self.CNT_TYPE_MASK - counters = self._counters - if typetag == self.CNT_INT: - intvalue = metainterp_sd.cpu.get_int_value( - deadframe, index) - if counters is None: - self._counters = counters = ResumeGuardCountersInt() - else: - assert isinstance(counters, ResumeGuardCountersInt) - counter = counters.see_int(intvalue) - elif typetag == self.CNT_REF: - refvalue = metainterp_sd.cpu.get_ref_value( - deadframe, index) - if counters is None: - self._counters = counters = ResumeGuardCountersRef() - else: - assert isinstance(counters, ResumeGuardCountersRef) - counter = counters.see_ref(refvalue) - elif typetag == self.CNT_FLOAT: - floatvalue = metainterp_sd.cpu.get_float_value( - deadframe, index) - if counters is None: - self._counters = counters = ResumeGuardCountersFloat() - else: - assert isinstance(counters, ResumeGuardCountersFloat) - counter = counters.see_float(floatvalue) + else: # we have a GUARD_VALUE that fails. + from rpython.rlib.objectmodel import current_object_addr_as_int + + index = intmask(self.status >> self.ST_SHIFT) + typetag = intmask(self.status & self.ST_TYPE_MASK) + + # fetch the actual value of the guard_value, possibly turning + # it to an integer + if typetag == self.TY_INT: + intval = metainterp_sd.cpu.get_int_value(deadframe, index) + elif typetag == self.TY_REF: + refval = metainterp_sd.cpu.get_ref_value(deadframe, index) + intval = lltype.cast_ptr_to_int(refval) + elif typetag == self.TY_FLOAT: + floatval = metainterp_sd.cpu.get_float_value(deadframe, index) + intval = longlong.gethash_fast(floatval) else: assert 0, typetag - return counter >= trace_eagerness + + if not we_are_translated(): + if isinstance(intval, llmemory.AddressAsInt): + intval = llmemory.cast_adr_to_int( + llmemory.cast_int_to_adr(intval), "forced") + + hash = (current_object_addr_as_int(self) * 777767777 + + intval * 1442968193) + index = jitcounter.get_index(hash) + # + increment = jitdriver_sd.warmstate.increment_trace_eagerness + return jitcounter.tick(index, increment) def start_compiling(self): # start tracing and compiling from this guard. - self._counter |= self.CNT_BUSY_FLAG + self.status |= self.ST_BUSY_FLAG def done_compiling(self): - # done tracing and compiling from this guard. Either the bridge has - # been successfully compiled, in which case whatever value we store - # in self._counter will not be seen any more, or not, in which case - # we should reset the counter to 0, in order to wait a bit until the - # next attempt. - if self._counter >= 0: - self._counter = 0 - self._counters = None + # done tracing and compiling from this guard. Note that if the + # bridge has not been successfully compiled, the jitcounter for + # it was reset to 0 already by jitcounter.tick() and not + # incremented at all as long as ST_BUSY_FLAG was set. + self.status &= ~self.ST_BUSY_FLAG def compile_and_attach(self, metainterp, new_loop): # We managed to create a bridge. Attach the new operations @@ -745,69 +742,6 @@ return res -class AbstractResumeGuardCounters(object): - # Completely custom algorithm for now: keep 5 pairs (value, counter), - # and when we need more, we discard the middle pair (middle in the - # current value of the counter). That way, we tend to keep the - # values with a high counter, but also we avoid always throwing away - # the most recently added value. **THIS ALGO MUST GO AWAY AT SOME POINT** - pass - -def _see(self, newvalue): - # find and update an existing counter - unused = -1 - for i in range(5): - cnt = self.counters[i] - if cnt: - if self.values[i] == newvalue: - cnt += 1 - self.counters[i] = cnt - return cnt - else: - unused = i - # not found. Use a previously unused entry, if there is one - if unused >= 0: - self.counters[unused] = 1 - self.values[unused] = newvalue - return 1 - # no unused entry. Overwrite the middle one. Computed with indices - # a, b, c meaning the highest, second highest, and third highest - # entries. - a = 0 - b = c = -1 - for i in range(1, 5): - if self.counters[i] > self.counters[a]: - c = b - b = a - a = i - elif b < 0 or self.counters[i] > self.counters[b]: - c = b - b = i - elif c < 0 or self.counters[i] > self.counters[c]: - c = i - self.counters[c] = 1 - self.values[c] = newvalue - return 1 - -class ResumeGuardCountersInt(AbstractResumeGuardCounters): - def __init__(self): - self.counters = [0] * 5 - self.values = [0] * 5 - see_int = func_with_new_name(_see, 'see_int') - -class ResumeGuardCountersRef(AbstractResumeGuardCounters): - def __init__(self): - self.counters = [0] * 5 - self.values = [history.ConstPtr.value] * 5 - see_ref = func_with_new_name(_see, 'see_ref') - -class ResumeGuardCountersFloat(AbstractResumeGuardCounters): - def __init__(self): - self.counters = [0] * 5 - self.values = [longlong.ZEROF] * 5 - see_float = func_with_new_name(_see, 'see_float') - - class ResumeFromInterpDescr(ResumeDescr): def __init__(self, original_greenkey): self.original_greenkey = original_greenkey diff --git a/rpython/jit/metainterp/counter.py b/rpython/jit/metainterp/counter.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/counter.py @@ -0,0 +1,153 @@ +from rpython.rlib.rarithmetic import r_singlefloat, r_uint +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo + + +r_uint32 = rffi.r_uint +assert r_uint32.BITS == 32 +UINT32MAX = 2 ** 32 - 1 + + +class JitCounter: + DEFAULT_SIZE = 4096 + + def __init__(self, size=DEFAULT_SIZE, translator=None): + "NOT_RPYTHON" + self.size = size + self.shift = 1 + while (UINT32MAX >> self.shift) != size - 1: + self.shift += 1 + assert self.shift < 999, "size is not a power of two <= 2**31" + self.timetable = lltype.malloc(rffi.CArray(rffi.FLOAT), size, + flavor='raw', zero=True, + track_allocation=False) + self.celltable = [None] * size + self._nextindex = r_uint(0) + # + if translator is not None: + class Glob: + step = 0 + glob = Glob() + def invoke_after_minor_collection(): + # After 32 minor collections, we call decay_all_counters(). + # The "--jit decay=N" option measures the amount the + # counters are then reduced by. + glob.step += 1 + if glob.step == 32: + glob.step = 0 + self.decay_all_counters() + if not hasattr(translator, '_jit2gc'): + translator._jit2gc = {} + translator._jit2gc['invoke_after_minor_collection'] = ( + invoke_after_minor_collection) + + def compute_threshold(self, threshold): + """Return the 'increment' value corresponding to the given number.""" + if threshold <= 0: + return 0.0 # no increment, never reach 1.0 + return 1.0 / (threshold - 0.001) + + def get_index(self, hash): + """Return the index (< self.size) from a hash value. This truncates + the hash to 32 bits, and then keep the *highest* remaining bits. + Be sure that hash is computed correctly.""" + hash32 = r_uint(r_uint32(hash)) # mask off the bits higher than 32 + index = hash32 >> self.shift # shift, resulting in a value < size + return index # return the result as a r_uint + get_index._always_inline_ = True + + def fetch_next_index(self): + result = self._nextindex + self._nextindex = (result + 1) & self.get_index(-1) + return result + + def tick(self, index, increment): + counter = float(self.timetable[index]) + increment + if counter < 1.0: + self.timetable[index] = r_singlefloat(counter) + return False + else: + # when the bound is reached, we immediately reset the value to 0.0 + self.reset(index) + return True + tick._always_inline_ = True + + def reset(self, index): + self.timetable[index] = r_singlefloat(0.0) + + def lookup_chain(self, index): + return self.celltable[index] + + def cleanup_chain(self, index): + self.reset(index) + self.install_new_cell(index, None) + + def install_new_cell(self, index, newcell): + cell = self.celltable[index] + keep = newcell + while cell is not None: + nextcell = cell.next + if not cell.should_remove_jitcell(): + cell.next = keep + keep = cell + cell = nextcell + self.celltable[index] = keep + + def set_decay(self, decay): + """Set the decay, from 0 (none) to 1000 (max).""" + if decay < 0: + decay = 0 + elif decay > 1000: + decay = 1000 + self.decay_by_mult = 1.0 - (decay * 0.001) + + def decay_all_counters(self): + # Called during a minor collection by the GC, to gradually decay + # counters that didn't reach their maximum. Thus if a counter + # is incremented very slowly, it will never reach the maximum. + # This avoids altogether the JIT compilation of rare paths. + # We also call this function when any maximum bound is reached, + # to avoid sudden bursts of JIT-compilation (the next one will + # not reach the maximum bound immmediately after). This is + # important in corner cases where we would suddenly compile more + # than one loop because all counters reach the bound at the same + # time, but where compiling all but the first one is pointless. + size = self.size + pypy__decay_jit_counters(self.timetable, self.decay_by_mult, size) + + +# this function is written directly in C; gcc will optimize it using SSE +eci = ExternalCompilationInfo(post_include_bits=[""" +static void pypy__decay_jit_counters(float table[], double f1, long size1) { + float f = (float)f1; + int i, size = (int)size1; + for (i=0; i> (32 - 7)) + +def test_fetch_next_index(): + jc = JitCounter(size=4) + lst = [jc.fetch_next_index() for i in range(10)] + assert lst == [0, 1, 2, 3, 0, 1, 2, 3, 0, 1] + +def test_tick(): + jc = JitCounter() + incr = jc.compute_threshold(4) + for i in range(5): + r = jc.tick(104, incr) + assert r is (i == 3) + for i in range(5): + r = jc.tick(108, incr) + s = jc.tick(109, incr) + assert r is (i == 3) + assert s is (i == 3) + jc.reset(108) + for i in range(5): + r = jc.tick(108, incr) + assert r is (i == 3) + +def test_install_new_chain(): + class Dead: + next = None + def should_remove_jitcell(self): + return True + class Alive: + next = None + def should_remove_jitcell(self): + return False + # + jc = JitCounter() + assert jc.lookup_chain(104) is None + d1 = Dead() + jc.install_new_cell(104, d1) + assert jc.lookup_chain(104) is d1 + d2 = Dead() + jc.install_new_cell(104, d2) + assert jc.lookup_chain(104) is d2 + assert d2.next is None + # + d3 = Alive() + jc.install_new_cell(104, d3) + assert jc.lookup_chain(104) is d3 + assert d3.next is None + d4 = Alive() + jc.install_new_cell(104, d4) + assert jc.lookup_chain(104) is d3 + assert d3.next is d4 + assert d4.next is None diff --git a/rpython/jit/metainterp/test/test_memmgr.py b/rpython/jit/metainterp/test/test_memmgr.py --- a/rpython/jit/metainterp/test/test_memmgr.py +++ b/rpython/jit/metainterp/test/test_memmgr.py @@ -15,7 +15,7 @@ from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rlib.jit import JitDriver, dont_look_inside from rpython.jit.metainterp.warmspot import get_stats -from rpython.jit.metainterp.warmstate import JitCell +from rpython.jit.metainterp.warmstate import BaseJitCell from rpython.rlib import rgc class FakeLoopToken: @@ -87,15 +87,15 @@ # these tests to pass. But we dont want it there always since that will # make all other tests take forever. def setup_class(cls): - original_get_procedure_token = JitCell.get_procedure_token + original_get_procedure_token = BaseJitCell.get_procedure_token def get_procedure_token(self): rgc.collect(); return original_get_procedure_token(self) - JitCell.get_procedure_token = get_procedure_token + BaseJitCell.get_procedure_token = get_procedure_token cls.original_get_procedure_token = original_get_procedure_token def teardown_class(cls): - JitCell.get_procedure_token = cls.original_get_procedure_token + BaseJitCell.get_procedure_token = cls.original_get_procedure_token def test_loop_kept_alive(self): myjitdriver = JitDriver(greens=[], reds=['n']) diff --git a/rpython/jit/metainterp/test/test_recursive.py b/rpython/jit/metainterp/test/test_recursive.py --- a/rpython/jit/metainterp/test/test_recursive.py +++ b/rpython/jit/metainterp/test/test_recursive.py @@ -342,7 +342,7 @@ assert res == 0 self.check_max_trace_length(TRACE_LIMIT) self.check_enter_count_at_most(10) # maybe - self.check_aborted_count(7) + self.check_aborted_count(6) def test_trace_limit_bridge(self): def recursive(n): @@ -425,7 +425,7 @@ res = self.meta_interp(loop, [20], failargs_limit=FAILARGS_LIMIT, listops=True) - self.check_aborted_count(5) + self.check_aborted_count(4) def test_max_failure_args_exc(self): FAILARGS_LIMIT = 10 @@ -465,7 +465,7 @@ res = self.meta_interp(main, [20], failargs_limit=FAILARGS_LIMIT, listops=True) assert not res - self.check_aborted_count(5) + self.check_aborted_count(4) def test_set_param_inlining(self): myjitdriver = JitDriver(greens=[], reds=['n', 'recurse']) diff --git a/rpython/jit/metainterp/test/test_warmstate.py b/rpython/jit/metainterp/test/test_warmstate.py --- a/rpython/jit/metainterp/test/test_warmstate.py +++ b/rpython/jit/metainterp/test/test_warmstate.py @@ -3,9 +3,10 @@ from rpython.rtyper.annlowlevel import llhelper from rpython.jit.metainterp.warmstate import wrap, unwrap, specialize_value from rpython.jit.metainterp.warmstate import equal_whatever, hash_whatever -from rpython.jit.metainterp.warmstate import WarmEnterState, JitCell +from rpython.jit.metainterp.warmstate import WarmEnterState from rpython.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr from rpython.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr +from rpython.jit.metainterp.counter import DeterministicJitCounter from rpython.jit.codewriter import longlong from rpython.rlib.rarithmetic import r_singlefloat @@ -77,69 +78,6 @@ interpret(fn, [42], type_system='lltype') -def test_make_jitcell_getter_default(): - class FakeJitDriverSD: - _green_args_spec = [lltype.Signed, lltype.Float] - state = WarmEnterState(None, FakeJitDriverSD()) - get_jitcell = state._make_jitcell_getter_default() - cell1 = get_jitcell(True, 42, 42.5) - assert isinstance(cell1, JitCell) - cell2 = get_jitcell(True, 42, 42.5) - assert cell1 is cell2 - cell3 = get_jitcell(True, 41, 42.5) - assert get_jitcell(False, 42, 0.25) is None - cell4 = get_jitcell(True, 42, 0.25) - assert get_jitcell(False, 42, 0.25) is cell4 - assert cell1 is not cell3 is not cell4 is not cell1 - -def test_make_jitcell_getter(): - class FakeJitDriverSD: - _green_args_spec = [lltype.Float] - _get_jitcell_at_ptr = None - state = WarmEnterState(None, FakeJitDriverSD()) - get_jitcell = state.make_jitcell_getter() - cell1 = get_jitcell(True, 1.75) - cell2 = get_jitcell(True, 1.75) - assert cell1 is cell2 - assert get_jitcell is state.make_jitcell_getter() - -def test_make_jitcell_getter_custom(): - from rpython.rtyper.typesystem import LowLevelTypeSystem - class FakeRTyper: - type_system = LowLevelTypeSystem.instance - celldict = {} - def getter(x, y): - return celldict.get((x, y)) - def setter(newcell, x, y): - newcell.x = x - newcell.y = y - celldict[x, y] = newcell - GETTER = lltype.Ptr(lltype.FuncType([lltype.Signed, lltype.Float], - llmemory.GCREF)) - SETTER = lltype.Ptr(lltype.FuncType([llmemory.GCREF, lltype.Signed, - lltype.Float], lltype.Void)) - class FakeWarmRunnerDesc: - rtyper = FakeRTyper() - cpu = None - memory_manager = None - class FakeJitDriverSD: - _get_jitcell_at_ptr = llhelper(GETTER, getter) - _set_jitcell_at_ptr = llhelper(SETTER, setter) - # - state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) - get_jitcell = state._make_jitcell_getter_custom() - cell1 = get_jitcell(True, 5, 42.5) - assert isinstance(cell1, JitCell) - assert cell1.x == 5 - assert cell1.y == 42.5 - cell2 = get_jitcell(True, 5, 42.5) - assert cell2 is cell1 - cell3 = get_jitcell(True, 41, 42.5) - assert get_jitcell(False, 42, 0.25) is None - cell4 = get_jitcell(True, 42, 0.25) - assert get_jitcell(False, 42, 0.25) is cell4 - assert cell1 is not cell3 is not cell4 is not cell1 - def test_make_unwrap_greenkey(): class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] @@ -149,26 +87,11 @@ assert greenargs == (42, 42.5) assert type(greenargs[0]) is int -def test_attach_unoptimized_bridge_from_interp(): - class FakeJitDriverSD: - _green_args_spec = [lltype.Signed, lltype.Float] - _get_jitcell_at_ptr = None - state = WarmEnterState(None, FakeJitDriverSD()) - get_jitcell = state.make_jitcell_getter() - class FakeLoopToken(object): - invalidated = False - looptoken = FakeLoopToken() - state.attach_procedure_to_interp([ConstInt(5), - constfloat(2.25)], - looptoken) - cell1 = get_jitcell(True, 5, 2.25) - assert cell1.counter < 0 - assert cell1.get_procedure_token() is looptoken - def test_make_jitdriver_callbacks_1(): class FakeWarmRunnerDesc: cpu = None memory_manager = None + jitcounter = DeterministicJitCounter() class FakeJitDriverSD: jitdriver = None _green_args_spec = [lltype.Signed, lltype.Float] @@ -198,13 +121,13 @@ rtyper = None cpu = None memory_manager = None + jitcounter = DeterministicJitCounter() class FakeJitDriverSD: jitdriver = None _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = llhelper(GET_LOCATION, get_location) _confirm_enter_jit_ptr = None _can_never_inline_ptr = None - _get_jitcell_at_ptr = None _should_unroll_one_iteration_ptr = None red_args_types = [] state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) @@ -224,13 +147,13 @@ rtyper = None cpu = None memory_manager = None + jitcounter = DeterministicJitCounter() class FakeJitDriverSD: jitdriver = None _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = None _confirm_enter_jit_ptr = llhelper(ENTER_JIT, confirm_enter_jit) _can_never_inline_ptr = None - _get_jitcell_at_ptr = None _should_unroll_one_iteration_ptr = None red_args_types = [] @@ -250,13 +173,13 @@ rtyper = None cpu = None memory_manager = None + jitcounter = DeterministicJitCounter() class FakeJitDriverSD: jitdriver = None _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = None _confirm_enter_jit_ptr = None _can_never_inline_ptr = llhelper(CAN_NEVER_INLINE, can_never_inline) - _get_jitcell_at_ptr = None _should_unroll_one_iteration_ptr = None red_args_types = [] @@ -264,52 +187,3 @@ state.make_jitdriver_callbacks() res = state.can_never_inline(5, 42.5) assert res is True - -def test_cleanup_jitcell_dict(): - class FakeJitDriverSD: - _green_args_spec = [lltype.Signed] - # - # Test creating tons of jitcells that remain at 0 - warmstate = WarmEnterState(None, FakeJitDriverSD()) - get_jitcell = warmstate._make_jitcell_getter_default() - cell1 = get_jitcell(True, -1) - assert len(warmstate._jitcell_dict) == 1 - # - for i in range(1, 20005): - get_jitcell(True, i) # should trigger a clean-up at 20001 - assert len(warmstate._jitcell_dict) == (i % 20000) + 1 - # - # Same test, with one jitcell that has a counter of BASE instead of 0 - warmstate = WarmEnterState(None, FakeJitDriverSD()) - get_jitcell = warmstate._make_jitcell_getter_default() - cell2 = get_jitcell(True, -2) - cell2.counter = BASE = warmstate.THRESHOLD_LIMIT // 2 # 50% - # - for i in range(0, 20005): - get_jitcell(True, i) - assert len(warmstate._jitcell_dict) == (i % 19999) + 2 - # - assert cell2 in warmstate._jitcell_dict.values() - assert cell2.counter == int(BASE * 0.92) # decayed once - # - # Same test, with jitcells that are compiled and freed by the memmgr - warmstate = WarmEnterState(None, FakeJitDriverSD()) - get_jitcell = warmstate._make_jitcell_getter_default() - get_jitcell(True, -1) - # - for i in range(1, 20005): - cell = get_jitcell(True, i) - cell.counter = -1 - cell.wref_procedure_token = None # or a dead weakref, equivalently - assert len(warmstate._jitcell_dict) == (i % 20000) + 1 - # - # Same test, with counter == -2 (rare case, kept alive) - warmstate = WarmEnterState(None, FakeJitDriverSD()) - get_jitcell = warmstate._make_jitcell_getter_default() - cell = get_jitcell(True, -1) - cell.counter = -2 - # - for i in range(1, 20005): - cell = get_jitcell(True, i) - cell.counter = -2 - assert len(warmstate._jitcell_dict) == i + 1 diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -205,6 +205,12 @@ vrefinfo = VirtualRefInfo(self) self.codewriter.setup_vrefinfo(vrefinfo) # + from rpython.jit.metainterp import counter + if self.cpu.translate_support_code: + self.jitcounter = counter.JitCounter(translator=translator) + else: + self.jitcounter = counter.DeterministicJitCounter() + # self.hooks = policy.jithookiface self.make_virtualizable_infos() self.make_driverhook_graphs() @@ -509,21 +515,10 @@ jd._maybe_compile_and_run_fn = maybe_compile_and_run def make_driverhook_graphs(self): - from rpython.rlib.jit import BaseJitCell - bk = self.rtyper.annotator.bookkeeper - classdef = bk.getuniqueclassdef(BaseJitCell) - s_BaseJitCell_or_None = annmodel.SomeInstance(classdef, - can_be_None=True) - s_BaseJitCell_not_None = annmodel.SomeInstance(classdef) s_Str = annmodel.SomeString() # annhelper = MixLevelHelperAnnotator(self.translator.rtyper) for jd in self.jitdrivers_sd: - jd._set_jitcell_at_ptr = self._make_hook_graph(jd, - annhelper, jd.jitdriver.set_jitcell_at, annmodel.s_None, - s_BaseJitCell_not_None) - jd._get_jitcell_at_ptr = self._make_hook_graph(jd, - annhelper, jd.jitdriver.get_jitcell_at, s_BaseJitCell_or_None) jd._get_printable_location_ptr = self._make_hook_graph(jd, annhelper, jd.jitdriver.get_printable_location, s_Str) jd._confirm_enter_jit_ptr = self._make_hook_graph(jd, diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -4,7 +4,7 @@ from rpython.jit.codewriter import support, heaptracker, longlong from rpython.jit.metainterp import history from rpython.rlib.debug import debug_start, debug_stop, debug_print -from rpython.rlib.jit import PARAMETERS, BaseJitCell +from rpython.rlib.jit import PARAMETERS from rpython.rlib.nonconst import NonConstant from rpython.rlib.objectmodel import specialize, we_are_translated, r_dict from rpython.rlib.rarithmetic import intmask @@ -124,15 +124,14 @@ return rffi.cast(lltype.Signed, x) -class JitCell(BaseJitCell): - # the counter can mean the following things: - # counter >= 0: not yet traced, wait till threshold is reached - # counter == -1: there is an entry bridge for this cell - # counter == -2: tracing is currently going on for this cell - counter = 0 - dont_trace_here = False - extra_delay = chr(0) +JC_TRACING = 0x01 +JC_DONT_TRACE_HERE = 0x02 +JC_TEMPORARY = 0x04 + +class BaseJitCell(object): + flags = 0 # JC_xxx flags wref_procedure_token = None + next = None def get_procedure_token(self): if self.wref_procedure_token is not None: @@ -141,18 +140,28 @@ return token return None - def set_procedure_token(self, token): + def set_procedure_token(self, token, tmp=False): self.wref_procedure_token = self._makeref(token) + if tmp: + self.flags |= JC_TEMPORARY + else: + self.flags &= ~JC_TEMPORARY def _makeref(self, token): assert token is not None return weakref.ref(token) + def should_remove_jitcell(self): + if self.get_procedure_token() is not None: + return False # don't remove JitCells with a procedure_token + # don't remove JitCells that are being traced, or JitCells with + # the "don't trace here" flag. Other JitCells can be removed. + return (self.flags & (JC_TRACING | JC_DONT_TRACE_HERE)) == 0 + # ____________________________________________________________ class WarmEnterState(object): - THRESHOLD_LIMIT = sys.maxint // 2 def __init__(self, warmrunnerdesc, jitdriver_sd): "NOT_RPYTHON" @@ -166,17 +175,13 @@ self.profiler = None # initialize the state with the default values of the # parameters specified in rlib/jit.py - for name, default_value in PARAMETERS.items(): - meth = getattr(self, 'set_param_' + name) - meth(default_value) + if self.warmrunnerdesc is not None: + for name, default_value in PARAMETERS.items(): + meth = getattr(self, 'set_param_' + name) + meth(default_value) def _compute_threshold(self, threshold): - if threshold <= 0: - return 0 # never reach the THRESHOLD_LIMIT - if threshold < 2: - threshold = 2 - return (self.THRESHOLD_LIMIT // threshold) + 1 - # the number is at least 1, and at most about half THRESHOLD_LIMIT + return self.warmrunnerdesc.jitcounter.compute_threshold(threshold) def set_param_threshold(self, threshold): self.increment_threshold = self._compute_threshold(threshold) @@ -185,11 +190,14 @@ self.increment_function_threshold = self._compute_threshold(threshold) def set_param_trace_eagerness(self, value): - self.trace_eagerness = value + self.increment_trace_eagerness = self._compute_threshold(value) def set_param_trace_limit(self, value): self.trace_limit = value + def set_param_decay(self, decay): + self.warmrunnerdesc.jitcounter.set_decay(decay) + def set_param_inlining(self, value): self.inlining = value @@ -230,18 +238,17 @@ self.warmrunnerdesc.memory_manager.max_unroll_loops = value def disable_noninlinable_function(self, greenkey): - cell = self.jit_cell_at_key(greenkey) - cell.dont_trace_here = True + cell = self.JitCell.ensure_jit_cell_at_key(greenkey) + cell.flags |= JC_DONT_TRACE_HERE debug_start("jit-disableinlining") loc = self.get_location_str(greenkey) debug_print("disabled inlining", loc) debug_stop("jit-disableinlining") def attach_procedure_to_interp(self, greenkey, procedure_token): - cell = self.jit_cell_at_key(greenkey) + cell = self.JitCell.ensure_jit_cell_at_key(greenkey) old_token = cell.get_procedure_token() cell.set_procedure_token(procedure_token) - cell.counter = -1 # valid procedure bridge attached if old_token is not None: self.cpu.redirect_call_assembler(old_token, procedure_token) # procedure_token is also kept alive by any loop that used @@ -262,7 +269,7 @@ vinfo = jitdriver_sd.virtualizable_info index_of_virtualizable = jitdriver_sd.index_of_virtualizable num_green_args = jitdriver_sd.num_green_args - get_jitcell = self.make_jitcell_getter() + JitCell = self.make_jitcell_subclass() self.make_jitdriver_callbacks() confirm_enter_jit = self.confirm_enter_jit range_red_args = unrolling_iterable( @@ -280,6 +287,7 @@ assert 0, kind func_execute_token = self.cpu.make_execute_token(*ARGS) cpu = self.cpu + jitcounter = self.warmrunnerdesc.jitcounter def execute_assembler(loop_token, *args): # Call the backend to run the 'looptoken' with the given @@ -304,74 +312,72 @@ # assert 0, "should have raised" - def bound_reached(cell, *args): - # bound reached, but we do a last check: if it is the first - # time we reach the bound, or if another loop or bridge was - # compiled since the last time we reached it, then decrease - # the counter by a few percents instead. It should avoid - # sudden bursts of JIT-compilation, and also corner cases - # where we suddenly compile more than one loop because all - # counters reach the bound at the same time, but where - # compiling all but the first one is pointless. - curgen = warmrunnerdesc.memory_manager.current_generation - curgen = chr(intmask(curgen) & 0xFF) # only use 8 bits - if we_are_translated() and curgen != cell.extra_delay: - cell.counter = int(self.THRESHOLD_LIMIT * 0.98) - cell.extra_delay = curgen + def bound_reached(index, cell, *args): + if not confirm_enter_jit(*args): return - # - if not confirm_enter_jit(*args): - cell.counter = 0 - return + jitcounter.decay_all_counters() # start tracing from rpython.jit.metainterp.pyjitpl import MetaInterp metainterp = MetaInterp(metainterp_sd, jitdriver_sd) - # set counter to -2, to mean "tracing in effect" - cell.counter = -2 + greenargs = args[:num_green_args] + if cell is None: + cell = JitCell(*greenargs) + jitcounter.install_new_cell(index, cell) + cell.flags |= JC_TRACING try: metainterp.compile_and_run_once(jitdriver_sd, *args) finally: - if cell.counter == -2: - cell.counter = 0 + cell.flags &= ~JC_TRACING - def maybe_compile_and_run(threshold, *args): + def maybe_compile_and_run(increment_threshold, *args): """Entry point to the JIT. Called at the point with the can_enter_jit() hint. """ - # look for the cell corresponding to the current greenargs + # Look for the cell corresponding to the current greenargs. + # Search for the JitCell that is of the correct subclass of + # BaseJitCell, and that stores a key that compares equal. + # These few lines inline some logic that is also on the + # JitCell class, to avoid computing the hash several times. greenargs = args[:num_green_args] - cell = get_jitcell(True, *greenargs) + index = JitCell.get_index(*greenargs) + cell = jitcounter.lookup_chain(index) + while cell is not None: + if isinstance(cell, JitCell) and cell.comparekey(*greenargs): + break # found + cell = cell.next + else: + # not found. increment the counter + if jitcounter.tick(index, increment_threshold): + bound_reached(index, None, *args) + return - if cell.counter >= 0: - # update the profiling counter - n = cell.counter + threshold - if n <= self.THRESHOLD_LIMIT: # bound not reached - cell.counter = n - return - else: - bound_reached(cell, *args) - return - else: - if cell.counter != -1: - assert cell.counter == -2 + # Here, we have found 'cell'. + # + if cell.flags & (JC_TRACING | JC_TEMPORARY): + if cell.flags & JC_TRACING: # tracing already happening in some outer invocation of # this function. don't trace a second time. return - if not confirm_enter_jit(*args): - return - # machine code was already compiled for these greenargs - procedure_token = cell.get_procedure_token() - if procedure_token is None: # it was a weakref that has been freed - cell.counter = 0 - return - # extract and unspecialize the red arguments to pass to - # the assembler - execute_args = () - for i in range_red_args: - execute_args += (unspecialize_value(args[i]), ) - # run it! this executes until interrupted by an exception - execute_assembler(procedure_token, *execute_args) - # + # attached by compile_tmp_callback(). count normally + if jitcounter.tick(index, increment_threshold): + bound_reached(index, cell, *args) + return + # machine code was already compiled for these greenargs + procedure_token = cell.get_procedure_token() + if procedure_token is None: + # it was an aborted compilation, or maybe a weakref that + # has been freed + jitcounter.cleanup_chain(index) + return + if not confirm_enter_jit(*args): + return + # extract and unspecialize the red arguments to pass to + # the assembler + execute_args = () + for i in range_red_args: + execute_args += (unspecialize_value(args[i]), ) + # run it! this executes until interrupted by an exception + execute_assembler(procedure_token, *execute_args) assert 0, "should not reach this point" maybe_compile_and_run._dont_inline_ = True @@ -406,144 +412,77 @@ # ---------- - def make_jitcell_getter(self): + def make_jitcell_subclass(self): "NOT_RPYTHON" - if hasattr(self, 'jit_getter'): - return self.jit_getter + if hasattr(self, 'JitCell'): + return self.JitCell # - if self.jitdriver_sd._get_jitcell_at_ptr is None: - jit_getter = self._make_jitcell_getter_default() - else: - jit_getter = self._make_jitcell_getter_custom() + jitcounter = self.warmrunnerdesc.jitcounter + jitdriver_sd = self.jitdriver_sd + green_args_name_spec = unrolling_iterable([('g%d' % i, TYPE) + for i, TYPE in enumerate(jitdriver_sd._green_args_spec)]) + unwrap_greenkey = self.make_unwrap_greenkey() + random_initial_value = hash(self) # - unwrap_greenkey = self.make_unwrap_greenkey() + class JitCell(BaseJitCell): + def __init__(self, *greenargs): + i = 0 + for attrname, _ in green_args_name_spec: + setattr(self, attrname, greenargs[i]) + i = i + 1 + + def comparekey(self, *greenargs2): + i = 0 + for attrname, TYPE in green_args_name_spec: + item1 = getattr(self, attrname) + if not equal_whatever(TYPE, item1, greenargs2[i]): + return False + i = i + 1 + return True + + @staticmethod + def get_index(*greenargs): + x = random_initial_value + i = 0 + for _, TYPE in green_args_name_spec: + item = greenargs[i] + y = hash_whatever(TYPE, item) + x = intmask((x ^ y) * 1405695061) # prime number, 2**30~31 + i = i + 1 + return jitcounter.get_index(x) + + @staticmethod + def get_jitcell(*greenargs): + index = JitCell.get_index(*greenargs) + cell = jitcounter.lookup_chain(index) + while cell is not None: + if (isinstance(cell, JitCell) and + cell.comparekey(*greenargs)): + return cell + cell = cell.next + return None + + @staticmethod + def get_jit_cell_at_key(greenkey): + greenargs = unwrap_greenkey(greenkey) + return JitCell.get_jitcell(*greenargs) + + @staticmethod + def ensure_jit_cell_at_key(greenkey): + greenargs = unwrap_greenkey(greenkey) + index = JitCell.get_index(*greenargs) + cell = jitcounter.lookup_chain(index) + while cell is not None: + if (isinstance(cell, JitCell) and + cell.comparekey(*greenargs)): + return cell + cell = cell.next + newcell = JitCell(*greenargs) + jitcounter.install_new_cell(index, newcell) + return newcell # - def jit_cell_at_key(greenkey): - greenargs = unwrap_greenkey(greenkey) - return jit_getter(True, *greenargs) - self.jit_cell_at_key = jit_cell_at_key - self.jit_getter = jit_getter - # - return jit_getter - - def _make_jitcell_getter_default(self): - "NOT_RPYTHON" - jitdriver_sd = self.jitdriver_sd - green_args_spec = unrolling_iterable(jitdriver_sd._green_args_spec) - # - def comparekey(greenargs1, greenargs2): - i = 0 - for TYPE in green_args_spec: - if not equal_whatever(TYPE, greenargs1[i], greenargs2[i]): - return False - i = i + 1 - return True - # - def hashkey(greenargs): - x = 0x345678 - i = 0 - for TYPE in green_args_spec: - item = greenargs[i] - y = hash_whatever(TYPE, item) - x = intmask((1000003 * x) ^ y) - i = i + 1 - return x - # - jitcell_dict = r_dict(comparekey, hashkey) - try: - self.warmrunnerdesc.stats.jitcell_dicts.append(jitcell_dict) - except AttributeError: - pass - # - def _cleanup_dict(): - minimum = self.THRESHOLD_LIMIT // 20 # minimum 5% - killme = [] - for key, cell in jitcell_dict.iteritems(): - if cell.counter >= 0: - cell.counter = int(cell.counter * 0.92) - if cell.counter < minimum: - killme.append(key) - elif (cell.counter == -1 - and cell.get_procedure_token() is None): - killme.append(key) - for key in killme: - del jitcell_dict[key] - # - def _maybe_cleanup_dict(): - # Once in a while, rarely, when too many entries have - # been put in the jitdict_dict, we do a cleanup phase: - # we decay all counters and kill entries with a too - # low counter. - self._trigger_automatic_cleanup += 1 - if self._trigger_automatic_cleanup > 20000: - self._trigger_automatic_cleanup = 0 - _cleanup_dict() - # - self._trigger_automatic_cleanup = 0 - self._jitcell_dict = jitcell_dict # for tests - # - def get_jitcell(build, *greenargs): - try: - cell = jitcell_dict[greenargs] - except KeyError: - if not build: - return None - _maybe_cleanup_dict() - cell = JitCell() - jitcell_dict[greenargs] = cell - return cell - return get_jitcell - - def _make_jitcell_getter_custom(self): - "NOT_RPYTHON" - rtyper = self.warmrunnerdesc.rtyper - get_jitcell_at_ptr = self.jitdriver_sd._get_jitcell_at_ptr - set_jitcell_at_ptr = self.jitdriver_sd._set_jitcell_at_ptr - lltohlhack = {} - # note that there is no equivalent of _maybe_cleanup_dict() - # in the case of custom getters. We assume that the interpreter - # stores the JitCells on some objects that can go away by GC, - # like the PyCode objects in PyPy. - # - def get_jitcell(build, *greenargs): - fn = support.maybe_on_top_of_llinterp(rtyper, get_jitcell_at_ptr) - cellref = fn(*greenargs) - # - if we_are_translated(): - BASEJITCELL = lltype.typeOf(cellref) - cell = cast_base_ptr_to_instance(JitCell, cellref) - else: - if isinstance(cellref, (BaseJitCell, type(None))): - BASEJITCELL = None - cell = cellref - else: - BASEJITCELL = lltype.typeOf(cellref) - if cellref: - cell = lltohlhack[rtyper.type_system.deref(cellref)] - else: - cell = None - if not build: - return cell - if cell is None: - cell = JitCell() - # - if we_are_translated(): - cellref = cast_object_to_ptr(BASEJITCELL, cell) - else: - if BASEJITCELL is None: - cellref = cell - else: - if isinstance(BASEJITCELL, lltype.Ptr): - cellref = lltype.malloc(BASEJITCELL.TO) - else: - assert False, "no clue" - lltohlhack[rtyper.type_system.deref(cellref)] = cell - # - fn = support.maybe_on_top_of_llinterp(rtyper, - set_jitcell_at_ptr) - fn(cellref, *greenargs) - return cell - return get_jitcell + self.JitCell = JitCell + return JitCell # ---------- @@ -553,15 +492,15 @@ # warmrunnerdesc = self.warmrunnerdesc unwrap_greenkey = self.make_unwrap_greenkey() - jit_getter = self.make_jitcell_getter() + JitCell = self.make_jitcell_subclass() jd = self.jitdriver_sd cpu = self.cpu def can_inline_greenargs(*greenargs): if can_never_inline(*greenargs): return False - cell = jit_getter(False, *greenargs) - if cell is not None and cell.dont_trace_here: + cell = JitCell.get_jitcell(*greenargs) + if cell is not None and (cell.flags & JC_DONT_TRACE_HERE) != 0: return False return True def can_inline_callable(greenkey): @@ -585,16 +524,14 @@ redargtypes = ''.join([kind[0] for kind in jd.red_args_types]) def get_assembler_token(greenkey): - cell = self.jit_cell_at_key(greenkey) + cell = JitCell.ensure_jit_cell_at_key(greenkey) procedure_token = cell.get_procedure_token() if procedure_token is None: from rpython.jit.metainterp.compile import compile_tmp_callback - if cell.counter == -1: # used to be a valid entry bridge, - cell.counter = 0 # but was freed in the meantime. memmgr = warmrunnerdesc.memory_manager procedure_token = compile_tmp_callback(cpu, jd, greenkey, redargtypes, memmgr) - cell.set_procedure_token(procedure_token) + cell.set_procedure_token(procedure_token, tmp=True) return procedure_token self.get_assembler_token = get_assembler_token diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1459,6 +1459,9 @@ self.get_total_memory_used()) if self.DEBUG >= 2: self.debug_check_consistency() # expensive! + # + self.root_walker.finished_minor_collection() + # debug_stop("gc-minor") diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -60,6 +60,9 @@ def _walk_prebuilt_gc(self, callback): pass + def finished_minor_collection(self): + pass + class BaseDirectGCTest(object): GC_PARAMS = {} diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -142,8 +142,11 @@ if hasattr(translator, '_jit2gc'): self.layoutbuilder = translator._jit2gc['layoutbuilder'] + finished_minor_collection = translator._jit2gc.get( + 'invoke_after_minor_collection', None) else: self.layoutbuilder = TransformerLayoutBuilder(translator, GCClass) + finished_minor_collection = None self.layoutbuilder.transformer = self self.get_type_id = self.layoutbuilder.get_type_id @@ -167,6 +170,7 @@ gcdata.gc = GCClass(translator.config.translation, **GC_PARAMS) root_walker = self.build_root_walker() + root_walker.finished_minor_collection_func = finished_minor_collection self.root_walker = root_walker gcdata.set_query_functions(gcdata.gc) gcdata.gc.set_root_walker(root_walker) @@ -1285,6 +1289,7 @@ class BaseRootWalker(object): thread_setup = None + finished_minor_collection_func = None def __init__(self, gctransformer): self.gcdata = gctransformer.gcdata @@ -1322,6 +1327,11 @@ if collect_stack_root: self.walk_stack_roots(collect_stack_root) # abstract + def finished_minor_collection(self): + func = self.finished_minor_collection_func + if func is not None: + func() + def need_stacklet_support(self): raise Exception("%s does not support stacklets" % ( self.__class__.__name__,)) diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py --- a/rpython/memory/gcwrapper.py +++ b/rpython/memory/gcwrapper.py @@ -195,6 +195,9 @@ for obj in self.gcheap._all_prebuilt_gc: collect(llmemory.cast_ptr_to_adr(obj._as_ptr())) + def finished_minor_collection(self): + pass + class DirectRunLayoutBuilder(gctypelayout.TypeLayoutBuilder): diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -45,6 +45,8 @@ taggedpointers = False def setup_class(cls): + cls.marker = lltype.malloc(rffi.CArray(lltype.Signed), 1, + flavor='raw', zero=True) funcs0 = [] funcs2 = [] cleanups = [] @@ -744,12 +746,18 @@ def ensure_layoutbuilder(cls, translator): jit2gc = getattr(translator, '_jit2gc', None) if jit2gc: + assert 'invoke_after_minor_collection' in jit2gc return jit2gc['layoutbuilder'] + marker = cls.marker GCClass = cls.gcpolicy.transformerclass.GCClass layoutbuilder = framework.TransformerLayoutBuilder(translator, GCClass) layoutbuilder.delay_encoding() + + def seeme(): + marker[0] += 1 translator._jit2gc = { 'layoutbuilder': layoutbuilder, + 'invoke_after_minor_collection': seeme, } return layoutbuilder @@ -768,6 +776,15 @@ g() i += 1 return 0 + + if cls.gcname == 'incminimark': + marker = cls.marker + def cleanup(): + assert marker[0] > 0 + marker[0] = 0 + else: + cleanup = None + def fix_graph_of_g(translator): from rpython.translator.translator import graphof from rpython.flowspace.model import Constant @@ -788,7 +805,7 @@ break else: assert 0, "oups, not found" - return f, None, fix_graph_of_g + return f, cleanup, fix_graph_of_g def test_do_malloc_operations(self): run = self.runner("do_malloc_operations") diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -442,6 +442,7 @@ 'threshold': 'number of times a loop has to run for it to become hot', 'function_threshold': 'number of times a function must run for it to become traced from start', 'trace_eagerness': 'number of times a guard has to fail before we start compiling a bridge', + 'decay': 'amount to regularly decay counters by (0=none, 1000=max)', 'trace_limit': 'number of recorded operations before we abort tracing with ABORT_TOO_LONG', 'inlining': 'inline python functions or not (1/0)', 'loop_longevity': 'a parameter controlling how long loops will be kept before being freed, an estimate', @@ -455,6 +456,7 @@ PARAMETERS = {'threshold': 1039, # just above 1024, prime 'function_threshold': 1619, # slightly more than one above, also prime 'trace_eagerness': 200, + 'decay': 40, 'trace_limit': 6000, 'inlining': 1, 'loop_longevity': 1000, @@ -513,8 +515,8 @@ if '.' not in name]) self._heuristic_order = {} # check if 'reds' and 'greens' are ordered self._make_extregistryentries() - self.get_jitcell_at = get_jitcell_at - self.set_jitcell_at = set_jitcell_at + assert get_jitcell_at is None, "get_jitcell_at no longer used" + assert set_jitcell_at is None, "set_jitcell_at no longer used" self.get_printable_location = get_printable_location self.confirm_enter_jit = confirm_enter_jit self.can_never_inline = can_never_inline @@ -694,9 +696,6 @@ # # Annotation and rtyping of some of the JitDriver methods -class BaseJitCell(object): - __slots__ = () - class ExtEnterLeaveMarker(ExtRegistryEntry): # Replace a call to myjitdriver.jit_merge_point(**livevars) @@ -744,10 +743,7 @@ def annotate_hooks(self, **kwds_s): driver = self.instance.im_self - s_jitcell = self.bookkeeper.valueoftype(BaseJitCell) h = self.annotate_hook - h(driver.get_jitcell_at, driver.greens, **kwds_s) - h(driver.set_jitcell_at, driver.greens, [s_jitcell], **kwds_s) h(driver.get_printable_location, driver.greens, **kwds_s) def annotate_hook(self, func, variables, args_s=[], **kwds_s): From noreply at buildbot.pypy.org Fri Nov 1 15:17:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 1 Nov 2013 15:17:47 +0100 (CET) Subject: [pypy-commit] pypy default: Document the branch. Message-ID: <20131101141747.ACD011C0196@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67794:57c30d4d18dd Date: 2013-11-01 15:16 +0100 http://bitbucket.org/pypy/pypy/changeset/57c30d4d18dd/ Log: Document the branch. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -124,3 +124,9 @@ .. branch: remove-numpypy Remove lib_pypy/numpypy in favor of external numpy fork + +.. branch: jit-counter +Tweak the jit counters: decay them at minor collection (actually +only every 32 minor collection is enough). Should avoid the "memory +leaks" observed in long-running processes, actually created by the +jit compiling more and more rarely executed paths. From noreply at buildbot.pypy.org Fri Nov 1 18:27:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 1 Nov 2013 18:27:58 +0100 (CET) Subject: [pypy-commit] pypy default: Tweak the Makefile to run "rpython" using python rather than pypy, if Message-ID: <20131101172758.323311C0112@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67795:f6ace317e67d Date: 2013-11-01 18:23 +0100 http://bitbucket.org/pypy/pypy/changeset/f6ace317e67d/ Log: Tweak the Makefile to run "rpython" using python rather than pypy, if there is no pypy installed. Gives a proper warning in this case. diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,7 +1,29 @@ all: pypy-c +PYPY_EXECUTABLE := $(shell which pypy) +URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") + +ifeq ($(PYPY_EXECUTABLE),) +RUNINTERP = python +else +RUNINTERP = $(PYPY_EXECUTABLE) +endif + pypy-c: - @echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM" - @sleep 3 - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + @echo + @echo "=============================================================" +ifeq ($(PYPY_EXECUTABLE),) + @echo "Building a regular (jitting) version of PyPy, using CPython." + @echo "This takes around 2 hours and $(URAM) GB of RAM." + @echo "Note that pre-installing a PyPy binary would reduce this time" + @echo "and produce basically the same result." +else + @echo "Building a regular (jitting) version of PyPy, using" + @echo "$(PYPY_EXECUTABLE) to run the translation itself." + @echo "This takes around 45 minutes and $(URAM) GB of RAM." +endif + @echo "=============================================================" + @echo + @sleep 5 + $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py From noreply at buildbot.pypy.org Fri Nov 1 18:27:59 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 1 Nov 2013 18:27:59 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20131101172759.958D41C0196@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67796:12503b1558cc Date: 2013-11-01 18:25 +0100 http://bitbucket.org/pypy/pypy/changeset/12503b1558cc/ Log: merge heads diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -124,3 +124,9 @@ .. branch: remove-numpypy Remove lib_pypy/numpypy in favor of external numpy fork + +.. branch: jit-counter +Tweak the jit counters: decay them at minor collection (actually +only every 32 minor collection is enough). Should avoid the "memory +leaks" observed in long-running processes, actually created by the +jit compiling more and more rarely executed paths. diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -32,22 +32,6 @@ name = opcode_method_names[ord(bytecode.co_code[next_instr])] return '%s #%d %s' % (bytecode.get_repr(), next_instr, name) -def make_greenkey_dict_key(next_instr, is_being_profiled): - # use only uints as keys in the jit_cells dict, rather than - # a tuple (next_instr, is_being_profiled) - return ( - (next_instr << 1) | - r_uint(intmask(is_being_profiled)) - ) - -def get_jitcell_at(next_instr, is_being_profiled, bytecode): - key = make_greenkey_dict_key(next_instr, is_being_profiled) - return bytecode.jit_cells.get(key, None) - -def set_jitcell_at(newcell, next_instr, is_being_profiled, bytecode): - key = make_greenkey_dict_key(next_instr, is_being_profiled) - bytecode.jit_cells[key] = newcell - def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode): return (bytecode.co_flags & CO_GENERATOR) != 0 @@ -58,8 +42,6 @@ virtualizables = ['frame'] pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location, - get_jitcell_at = get_jitcell_at, - set_jitcell_at = set_jitcell_at, should_unroll_one_iteration = should_unroll_one_iteration, name='pypyjit') @@ -121,18 +103,6 @@ return intmask(decr_by) -PyCode__initialize = PyCode._initialize - -class __extend__(PyCode): - __metaclass__ = extendabletype - - def _initialize(self): - PyCode__initialize(self) - self.jit_cells = {} - - def _cleanup_(self): - self.jit_cells = {} - # ____________________________________________________________ # # Public interface diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -369,7 +369,9 @@ translator = self.translator self.layoutbuilder = framework.TransformerLayoutBuilder(translator) self.layoutbuilder.delay_encoding() - translator._jit2gc = {'layoutbuilder': self.layoutbuilder} + if not hasattr(translator, '_jit2gc'): + translator._jit2gc = {} + translator._jit2gc['layoutbuilder'] = self.layoutbuilder def _setup_gcclass(self): from rpython.memory.gcheader import GCHeaderBuilder diff --git a/rpython/jit/codewriter/longlong.py b/rpython/jit/codewriter/longlong.py --- a/rpython/jit/codewriter/longlong.py +++ b/rpython/jit/codewriter/longlong.py @@ -25,6 +25,7 @@ getfloatstorage = lambda x: x getrealfloat = lambda x: x gethash = compute_hash + gethash_fast = longlong2float.float2longlong is_longlong = lambda TYPE: False # ------------------------------------- @@ -40,6 +41,7 @@ getfloatstorage = longlong2float.float2longlong getrealfloat = longlong2float.longlong2float gethash = lambda xll: rarithmetic.intmask(xll - (xll >> 32)) + gethash_fast = gethash is_longlong = lambda TYPE: (TYPE is lltype.SignedLongLong or TYPE is lltype.UnsignedLongLong) diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1,8 +1,9 @@ import weakref -from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import debug_start, debug_stop, debug_print +from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rlib import rstack from rpython.rlib.jit import JitDebugInfo, Counters, dont_look_inside from rpython.conftest import option @@ -483,11 +484,7 @@ pass class ResumeGuardDescr(ResumeDescr): - _counter = 0 # on a GUARD_VALUE, there is one counter per value; - _counters = None # they get stored in _counters then. - # this class also gets the following attributes stored by resume.py code - # XXX move all of unused stuff to guard_op, now that we can have # a separate class, so it does not survive that long rd_snapshot = None @@ -498,18 +495,26 @@ rd_virtuals = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) - CNT_BASE_MASK = 0x0FFFFFFF # the base counter value - CNT_BUSY_FLAG = 0x10000000 # if set, busy tracing from the guard - CNT_TYPE_MASK = 0x60000000 # mask for the type + status = r_uint(0) - CNT_INT = 0x20000000 - CNT_REF = 0x40000000 - CNT_FLOAT = 0x60000000 + ST_BUSY_FLAG = 0x01 # if set, busy tracing from the guard + ST_TYPE_MASK = 0x06 # mask for the type (TY_xxx) + ST_SHIFT = 3 # in "status >> ST_SHIFT" is stored: + # - if TY_NONE, the jitcounter index directly + # - otherwise, the guard_value failarg index + TY_NONE = 0x00 + TY_INT = 0x02 + TY_REF = 0x04 + TY_FLOAT = 0x06 - def store_final_boxes(self, guard_op, boxes): + def store_final_boxes(self, guard_op, boxes, metainterp_sd): guard_op.setfailargs(boxes) self.rd_count = len(boxes) self.guard_opnum = guard_op.getopnum() + # + if metainterp_sd.warmrunnerdesc is not None: # for tests + jitcounter = metainterp_sd.warmrunnerdesc.jitcounter + self.status = jitcounter.fetch_next_index() << self.ST_SHIFT def make_a_counter_per_value(self, guard_value_op): assert guard_value_op.getopnum() == rop.GUARD_VALUE @@ -519,18 +524,15 @@ except ValueError: return # xxx probably very rare else: - if i > self.CNT_BASE_MASK: - return # probably never, but better safe than sorry if box.type == history.INT: - cnt = self.CNT_INT + ty = self.TY_INT elif box.type == history.REF: - cnt = self.CNT_REF + ty = self.TY_REF elif box.type == history.FLOAT: - cnt = self.CNT_FLOAT + ty = self.TY_FLOAT else: assert 0, box.type - assert cnt > self.CNT_BASE_MASK - self._counter = cnt | i + self.status = ty | (r_uint(i) << self.ST_SHIFT) def handle_fail(self, deadframe, metainterp_sd, jitdriver_sd): if self.must_compile(deadframe, metainterp_sd, jitdriver_sd): @@ -557,65 +559,60 @@ _trace_and_compile_from_bridge._dont_inline_ = True def must_compile(self, deadframe, metainterp_sd, jitdriver_sd): - trace_eagerness = jitdriver_sd.warmstate.trace_eagerness + jitcounter = metainterp_sd.warmrunnerdesc.jitcounter # - if self._counter <= self.CNT_BASE_MASK: - # simple case: just counting from 0 to trace_eagerness - self._counter += 1 - return self._counter >= trace_eagerness + if self.status & (self.ST_BUSY_FLAG | self.ST_TYPE_MASK) == 0: + # common case: this is not a guard_value, and we are not + # already busy tracing. The rest of self.status stores a + # valid per-guard index in the jitcounter. + index = self.status >> self.ST_SHIFT # # do we have the BUSY flag? If so, we're tracing right now, e.g. in an # outer invocation of the same function, so don't trace again for now. - elif self._counter & self.CNT_BUSY_FLAG: + elif self.status & self.ST_BUSY_FLAG: return False # - else: # we have a GUARD_VALUE that fails. Make a _counters instance - # (only now, when the guard is actually failing at least once), - # and use it to record some statistics about the failing values. - index = self._counter & self.CNT_BASE_MASK - typetag = self._counter & self.CNT_TYPE_MASK - counters = self._counters - if typetag == self.CNT_INT: - intvalue = metainterp_sd.cpu.get_int_value( - deadframe, index) - if counters is None: - self._counters = counters = ResumeGuardCountersInt() - else: - assert isinstance(counters, ResumeGuardCountersInt) - counter = counters.see_int(intvalue) - elif typetag == self.CNT_REF: - refvalue = metainterp_sd.cpu.get_ref_value( - deadframe, index) - if counters is None: - self._counters = counters = ResumeGuardCountersRef() - else: - assert isinstance(counters, ResumeGuardCountersRef) - counter = counters.see_ref(refvalue) - elif typetag == self.CNT_FLOAT: - floatvalue = metainterp_sd.cpu.get_float_value( - deadframe, index) - if counters is None: - self._counters = counters = ResumeGuardCountersFloat() - else: - assert isinstance(counters, ResumeGuardCountersFloat) - counter = counters.see_float(floatvalue) + else: # we have a GUARD_VALUE that fails. + from rpython.rlib.objectmodel import current_object_addr_as_int + + index = intmask(self.status >> self.ST_SHIFT) + typetag = intmask(self.status & self.ST_TYPE_MASK) + + # fetch the actual value of the guard_value, possibly turning + # it to an integer + if typetag == self.TY_INT: + intval = metainterp_sd.cpu.get_int_value(deadframe, index) + elif typetag == self.TY_REF: + refval = metainterp_sd.cpu.get_ref_value(deadframe, index) + intval = lltype.cast_ptr_to_int(refval) + elif typetag == self.TY_FLOAT: + floatval = metainterp_sd.cpu.get_float_value(deadframe, index) + intval = longlong.gethash_fast(floatval) else: assert 0, typetag - return counter >= trace_eagerness + + if not we_are_translated(): + if isinstance(intval, llmemory.AddressAsInt): + intval = llmemory.cast_adr_to_int( + llmemory.cast_int_to_adr(intval), "forced") + + hash = (current_object_addr_as_int(self) * 777767777 + + intval * 1442968193) + index = jitcounter.get_index(hash) + # + increment = jitdriver_sd.warmstate.increment_trace_eagerness + return jitcounter.tick(index, increment) def start_compiling(self): # start tracing and compiling from this guard. - self._counter |= self.CNT_BUSY_FLAG + self.status |= self.ST_BUSY_FLAG def done_compiling(self): - # done tracing and compiling from this guard. Either the bridge has - # been successfully compiled, in which case whatever value we store - # in self._counter will not be seen any more, or not, in which case - # we should reset the counter to 0, in order to wait a bit until the - # next attempt. - if self._counter >= 0: - self._counter = 0 - self._counters = None + # done tracing and compiling from this guard. Note that if the + # bridge has not been successfully compiled, the jitcounter for + # it was reset to 0 already by jitcounter.tick() and not + # incremented at all as long as ST_BUSY_FLAG was set. + self.status &= ~self.ST_BUSY_FLAG def compile_and_attach(self, metainterp, new_loop): # We managed to create a bridge. Attach the new operations @@ -745,69 +742,6 @@ return res -class AbstractResumeGuardCounters(object): - # Completely custom algorithm for now: keep 5 pairs (value, counter), - # and when we need more, we discard the middle pair (middle in the - # current value of the counter). That way, we tend to keep the - # values with a high counter, but also we avoid always throwing away - # the most recently added value. **THIS ALGO MUST GO AWAY AT SOME POINT** - pass - -def _see(self, newvalue): - # find and update an existing counter - unused = -1 - for i in range(5): - cnt = self.counters[i] - if cnt: - if self.values[i] == newvalue: - cnt += 1 - self.counters[i] = cnt - return cnt - else: - unused = i - # not found. Use a previously unused entry, if there is one - if unused >= 0: - self.counters[unused] = 1 - self.values[unused] = newvalue - return 1 - # no unused entry. Overwrite the middle one. Computed with indices - # a, b, c meaning the highest, second highest, and third highest - # entries. - a = 0 - b = c = -1 - for i in range(1, 5): - if self.counters[i] > self.counters[a]: - c = b - b = a - a = i - elif b < 0 or self.counters[i] > self.counters[b]: - c = b - b = i - elif c < 0 or self.counters[i] > self.counters[c]: - c = i - self.counters[c] = 1 - self.values[c] = newvalue - return 1 - -class ResumeGuardCountersInt(AbstractResumeGuardCounters): - def __init__(self): - self.counters = [0] * 5 - self.values = [0] * 5 - see_int = func_with_new_name(_see, 'see_int') - -class ResumeGuardCountersRef(AbstractResumeGuardCounters): - def __init__(self): - self.counters = [0] * 5 - self.values = [history.ConstPtr.value] * 5 - see_ref = func_with_new_name(_see, 'see_ref') - -class ResumeGuardCountersFloat(AbstractResumeGuardCounters): - def __init__(self): - self.counters = [0] * 5 - self.values = [longlong.ZEROF] * 5 - see_float = func_with_new_name(_see, 'see_float') - - class ResumeFromInterpDescr(ResumeDescr): def __init__(self, original_greenkey): self.original_greenkey = original_greenkey diff --git a/rpython/jit/metainterp/counter.py b/rpython/jit/metainterp/counter.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/counter.py @@ -0,0 +1,153 @@ +from rpython.rlib.rarithmetic import r_singlefloat, r_uint +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo + + +r_uint32 = rffi.r_uint +assert r_uint32.BITS == 32 +UINT32MAX = 2 ** 32 - 1 + + +class JitCounter: + DEFAULT_SIZE = 4096 + + def __init__(self, size=DEFAULT_SIZE, translator=None): + "NOT_RPYTHON" + self.size = size + self.shift = 1 + while (UINT32MAX >> self.shift) != size - 1: + self.shift += 1 + assert self.shift < 999, "size is not a power of two <= 2**31" + self.timetable = lltype.malloc(rffi.CArray(rffi.FLOAT), size, + flavor='raw', zero=True, + track_allocation=False) + self.celltable = [None] * size + self._nextindex = r_uint(0) + # + if translator is not None: + class Glob: + step = 0 + glob = Glob() + def invoke_after_minor_collection(): + # After 32 minor collections, we call decay_all_counters(). + # The "--jit decay=N" option measures the amount the + # counters are then reduced by. + glob.step += 1 + if glob.step == 32: + glob.step = 0 + self.decay_all_counters() + if not hasattr(translator, '_jit2gc'): + translator._jit2gc = {} + translator._jit2gc['invoke_after_minor_collection'] = ( + invoke_after_minor_collection) + + def compute_threshold(self, threshold): + """Return the 'increment' value corresponding to the given number.""" + if threshold <= 0: + return 0.0 # no increment, never reach 1.0 + return 1.0 / (threshold - 0.001) + + def get_index(self, hash): + """Return the index (< self.size) from a hash value. This truncates + the hash to 32 bits, and then keep the *highest* remaining bits. + Be sure that hash is computed correctly.""" + hash32 = r_uint(r_uint32(hash)) # mask off the bits higher than 32 + index = hash32 >> self.shift # shift, resulting in a value < size + return index # return the result as a r_uint + get_index._always_inline_ = True + + def fetch_next_index(self): + result = self._nextindex + self._nextindex = (result + 1) & self.get_index(-1) + return result + + def tick(self, index, increment): + counter = float(self.timetable[index]) + increment + if counter < 1.0: + self.timetable[index] = r_singlefloat(counter) + return False + else: + # when the bound is reached, we immediately reset the value to 0.0 + self.reset(index) + return True + tick._always_inline_ = True + + def reset(self, index): + self.timetable[index] = r_singlefloat(0.0) + + def lookup_chain(self, index): + return self.celltable[index] + + def cleanup_chain(self, index): + self.reset(index) + self.install_new_cell(index, None) + + def install_new_cell(self, index, newcell): + cell = self.celltable[index] + keep = newcell + while cell is not None: + nextcell = cell.next + if not cell.should_remove_jitcell(): + cell.next = keep + keep = cell + cell = nextcell + self.celltable[index] = keep + + def set_decay(self, decay): + """Set the decay, from 0 (none) to 1000 (max).""" + if decay < 0: + decay = 0 + elif decay > 1000: + decay = 1000 + self.decay_by_mult = 1.0 - (decay * 0.001) + + def decay_all_counters(self): + # Called during a minor collection by the GC, to gradually decay + # counters that didn't reach their maximum. Thus if a counter + # is incremented very slowly, it will never reach the maximum. + # This avoids altogether the JIT compilation of rare paths. + # We also call this function when any maximum bound is reached, + # to avoid sudden bursts of JIT-compilation (the next one will + # not reach the maximum bound immmediately after). This is + # important in corner cases where we would suddenly compile more + # than one loop because all counters reach the bound at the same + # time, but where compiling all but the first one is pointless. + size = self.size + pypy__decay_jit_counters(self.timetable, self.decay_by_mult, size) + + +# this function is written directly in C; gcc will optimize it using SSE +eci = ExternalCompilationInfo(post_include_bits=[""" +static void pypy__decay_jit_counters(float table[], double f1, long size1) { + float f = (float)f1; + int i, size = (int)size1; + for (i=0; i> (32 - 7)) + +def test_fetch_next_index(): + jc = JitCounter(size=4) + lst = [jc.fetch_next_index() for i in range(10)] + assert lst == [0, 1, 2, 3, 0, 1, 2, 3, 0, 1] + +def test_tick(): + jc = JitCounter() + incr = jc.compute_threshold(4) + for i in range(5): + r = jc.tick(104, incr) + assert r is (i == 3) + for i in range(5): + r = jc.tick(108, incr) + s = jc.tick(109, incr) + assert r is (i == 3) + assert s is (i == 3) + jc.reset(108) + for i in range(5): + r = jc.tick(108, incr) + assert r is (i == 3) + +def test_install_new_chain(): + class Dead: + next = None + def should_remove_jitcell(self): + return True + class Alive: + next = None + def should_remove_jitcell(self): + return False + # + jc = JitCounter() + assert jc.lookup_chain(104) is None + d1 = Dead() + jc.install_new_cell(104, d1) + assert jc.lookup_chain(104) is d1 + d2 = Dead() + jc.install_new_cell(104, d2) + assert jc.lookup_chain(104) is d2 + assert d2.next is None + # + d3 = Alive() + jc.install_new_cell(104, d3) + assert jc.lookup_chain(104) is d3 + assert d3.next is None + d4 = Alive() + jc.install_new_cell(104, d4) + assert jc.lookup_chain(104) is d3 + assert d3.next is d4 + assert d4.next is None diff --git a/rpython/jit/metainterp/test/test_memmgr.py b/rpython/jit/metainterp/test/test_memmgr.py --- a/rpython/jit/metainterp/test/test_memmgr.py +++ b/rpython/jit/metainterp/test/test_memmgr.py @@ -15,7 +15,7 @@ from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rlib.jit import JitDriver, dont_look_inside from rpython.jit.metainterp.warmspot import get_stats -from rpython.jit.metainterp.warmstate import JitCell +from rpython.jit.metainterp.warmstate import BaseJitCell from rpython.rlib import rgc class FakeLoopToken: @@ -87,15 +87,15 @@ # these tests to pass. But we dont want it there always since that will # make all other tests take forever. def setup_class(cls): - original_get_procedure_token = JitCell.get_procedure_token + original_get_procedure_token = BaseJitCell.get_procedure_token def get_procedure_token(self): rgc.collect(); return original_get_procedure_token(self) - JitCell.get_procedure_token = get_procedure_token + BaseJitCell.get_procedure_token = get_procedure_token cls.original_get_procedure_token = original_get_procedure_token def teardown_class(cls): - JitCell.get_procedure_token = cls.original_get_procedure_token + BaseJitCell.get_procedure_token = cls.original_get_procedure_token def test_loop_kept_alive(self): myjitdriver = JitDriver(greens=[], reds=['n']) diff --git a/rpython/jit/metainterp/test/test_recursive.py b/rpython/jit/metainterp/test/test_recursive.py --- a/rpython/jit/metainterp/test/test_recursive.py +++ b/rpython/jit/metainterp/test/test_recursive.py @@ -342,7 +342,7 @@ assert res == 0 self.check_max_trace_length(TRACE_LIMIT) self.check_enter_count_at_most(10) # maybe - self.check_aborted_count(7) + self.check_aborted_count(6) def test_trace_limit_bridge(self): def recursive(n): @@ -425,7 +425,7 @@ res = self.meta_interp(loop, [20], failargs_limit=FAILARGS_LIMIT, listops=True) - self.check_aborted_count(5) + self.check_aborted_count(4) def test_max_failure_args_exc(self): FAILARGS_LIMIT = 10 @@ -465,7 +465,7 @@ res = self.meta_interp(main, [20], failargs_limit=FAILARGS_LIMIT, listops=True) assert not res - self.check_aborted_count(5) + self.check_aborted_count(4) def test_set_param_inlining(self): myjitdriver = JitDriver(greens=[], reds=['n', 'recurse']) diff --git a/rpython/jit/metainterp/test/test_warmstate.py b/rpython/jit/metainterp/test/test_warmstate.py --- a/rpython/jit/metainterp/test/test_warmstate.py +++ b/rpython/jit/metainterp/test/test_warmstate.py @@ -3,9 +3,10 @@ from rpython.rtyper.annlowlevel import llhelper from rpython.jit.metainterp.warmstate import wrap, unwrap, specialize_value from rpython.jit.metainterp.warmstate import equal_whatever, hash_whatever -from rpython.jit.metainterp.warmstate import WarmEnterState, JitCell +from rpython.jit.metainterp.warmstate import WarmEnterState from rpython.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr from rpython.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr +from rpython.jit.metainterp.counter import DeterministicJitCounter from rpython.jit.codewriter import longlong from rpython.rlib.rarithmetic import r_singlefloat @@ -77,69 +78,6 @@ interpret(fn, [42], type_system='lltype') -def test_make_jitcell_getter_default(): - class FakeJitDriverSD: - _green_args_spec = [lltype.Signed, lltype.Float] - state = WarmEnterState(None, FakeJitDriverSD()) - get_jitcell = state._make_jitcell_getter_default() - cell1 = get_jitcell(True, 42, 42.5) - assert isinstance(cell1, JitCell) - cell2 = get_jitcell(True, 42, 42.5) - assert cell1 is cell2 - cell3 = get_jitcell(True, 41, 42.5) - assert get_jitcell(False, 42, 0.25) is None - cell4 = get_jitcell(True, 42, 0.25) - assert get_jitcell(False, 42, 0.25) is cell4 - assert cell1 is not cell3 is not cell4 is not cell1 - -def test_make_jitcell_getter(): - class FakeJitDriverSD: - _green_args_spec = [lltype.Float] - _get_jitcell_at_ptr = None - state = WarmEnterState(None, FakeJitDriverSD()) - get_jitcell = state.make_jitcell_getter() - cell1 = get_jitcell(True, 1.75) - cell2 = get_jitcell(True, 1.75) - assert cell1 is cell2 - assert get_jitcell is state.make_jitcell_getter() - -def test_make_jitcell_getter_custom(): - from rpython.rtyper.typesystem import LowLevelTypeSystem - class FakeRTyper: - type_system = LowLevelTypeSystem.instance - celldict = {} - def getter(x, y): - return celldict.get((x, y)) - def setter(newcell, x, y): - newcell.x = x - newcell.y = y - celldict[x, y] = newcell - GETTER = lltype.Ptr(lltype.FuncType([lltype.Signed, lltype.Float], - llmemory.GCREF)) - SETTER = lltype.Ptr(lltype.FuncType([llmemory.GCREF, lltype.Signed, - lltype.Float], lltype.Void)) - class FakeWarmRunnerDesc: - rtyper = FakeRTyper() - cpu = None - memory_manager = None - class FakeJitDriverSD: - _get_jitcell_at_ptr = llhelper(GETTER, getter) - _set_jitcell_at_ptr = llhelper(SETTER, setter) - # - state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) - get_jitcell = state._make_jitcell_getter_custom() - cell1 = get_jitcell(True, 5, 42.5) - assert isinstance(cell1, JitCell) - assert cell1.x == 5 - assert cell1.y == 42.5 - cell2 = get_jitcell(True, 5, 42.5) - assert cell2 is cell1 - cell3 = get_jitcell(True, 41, 42.5) - assert get_jitcell(False, 42, 0.25) is None - cell4 = get_jitcell(True, 42, 0.25) - assert get_jitcell(False, 42, 0.25) is cell4 - assert cell1 is not cell3 is not cell4 is not cell1 - def test_make_unwrap_greenkey(): class FakeJitDriverSD: _green_args_spec = [lltype.Signed, lltype.Float] @@ -149,26 +87,11 @@ assert greenargs == (42, 42.5) assert type(greenargs[0]) is int -def test_attach_unoptimized_bridge_from_interp(): - class FakeJitDriverSD: - _green_args_spec = [lltype.Signed, lltype.Float] - _get_jitcell_at_ptr = None - state = WarmEnterState(None, FakeJitDriverSD()) - get_jitcell = state.make_jitcell_getter() - class FakeLoopToken(object): - invalidated = False - looptoken = FakeLoopToken() - state.attach_procedure_to_interp([ConstInt(5), - constfloat(2.25)], - looptoken) - cell1 = get_jitcell(True, 5, 2.25) - assert cell1.counter < 0 - assert cell1.get_procedure_token() is looptoken - def test_make_jitdriver_callbacks_1(): class FakeWarmRunnerDesc: cpu = None memory_manager = None + jitcounter = DeterministicJitCounter() class FakeJitDriverSD: jitdriver = None _green_args_spec = [lltype.Signed, lltype.Float] @@ -198,13 +121,13 @@ rtyper = None cpu = None memory_manager = None + jitcounter = DeterministicJitCounter() class FakeJitDriverSD: jitdriver = None _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = llhelper(GET_LOCATION, get_location) _confirm_enter_jit_ptr = None _can_never_inline_ptr = None - _get_jitcell_at_ptr = None _should_unroll_one_iteration_ptr = None red_args_types = [] state = WarmEnterState(FakeWarmRunnerDesc(), FakeJitDriverSD()) @@ -224,13 +147,13 @@ rtyper = None cpu = None memory_manager = None + jitcounter = DeterministicJitCounter() class FakeJitDriverSD: jitdriver = None _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = None _confirm_enter_jit_ptr = llhelper(ENTER_JIT, confirm_enter_jit) _can_never_inline_ptr = None - _get_jitcell_at_ptr = None _should_unroll_one_iteration_ptr = None red_args_types = [] @@ -250,13 +173,13 @@ rtyper = None cpu = None memory_manager = None + jitcounter = DeterministicJitCounter() class FakeJitDriverSD: jitdriver = None _green_args_spec = [lltype.Signed, lltype.Float] _get_printable_location_ptr = None _confirm_enter_jit_ptr = None _can_never_inline_ptr = llhelper(CAN_NEVER_INLINE, can_never_inline) - _get_jitcell_at_ptr = None _should_unroll_one_iteration_ptr = None red_args_types = [] @@ -264,52 +187,3 @@ state.make_jitdriver_callbacks() res = state.can_never_inline(5, 42.5) assert res is True - -def test_cleanup_jitcell_dict(): - class FakeJitDriverSD: - _green_args_spec = [lltype.Signed] - # - # Test creating tons of jitcells that remain at 0 - warmstate = WarmEnterState(None, FakeJitDriverSD()) - get_jitcell = warmstate._make_jitcell_getter_default() - cell1 = get_jitcell(True, -1) - assert len(warmstate._jitcell_dict) == 1 - # - for i in range(1, 20005): - get_jitcell(True, i) # should trigger a clean-up at 20001 - assert len(warmstate._jitcell_dict) == (i % 20000) + 1 - # - # Same test, with one jitcell that has a counter of BASE instead of 0 - warmstate = WarmEnterState(None, FakeJitDriverSD()) - get_jitcell = warmstate._make_jitcell_getter_default() - cell2 = get_jitcell(True, -2) - cell2.counter = BASE = warmstate.THRESHOLD_LIMIT // 2 # 50% - # - for i in range(0, 20005): - get_jitcell(True, i) - assert len(warmstate._jitcell_dict) == (i % 19999) + 2 - # - assert cell2 in warmstate._jitcell_dict.values() - assert cell2.counter == int(BASE * 0.92) # decayed once - # - # Same test, with jitcells that are compiled and freed by the memmgr - warmstate = WarmEnterState(None, FakeJitDriverSD()) - get_jitcell = warmstate._make_jitcell_getter_default() - get_jitcell(True, -1) - # - for i in range(1, 20005): - cell = get_jitcell(True, i) - cell.counter = -1 - cell.wref_procedure_token = None # or a dead weakref, equivalently - assert len(warmstate._jitcell_dict) == (i % 20000) + 1 - # - # Same test, with counter == -2 (rare case, kept alive) - warmstate = WarmEnterState(None, FakeJitDriverSD()) - get_jitcell = warmstate._make_jitcell_getter_default() - cell = get_jitcell(True, -1) - cell.counter = -2 - # - for i in range(1, 20005): - cell = get_jitcell(True, i) - cell.counter = -2 - assert len(warmstate._jitcell_dict) == i + 1 diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -205,6 +205,12 @@ vrefinfo = VirtualRefInfo(self) self.codewriter.setup_vrefinfo(vrefinfo) # + from rpython.jit.metainterp import counter + if self.cpu.translate_support_code: + self.jitcounter = counter.JitCounter(translator=translator) + else: + self.jitcounter = counter.DeterministicJitCounter() + # self.hooks = policy.jithookiface self.make_virtualizable_infos() self.make_driverhook_graphs() @@ -509,21 +515,10 @@ jd._maybe_compile_and_run_fn = maybe_compile_and_run def make_driverhook_graphs(self): - from rpython.rlib.jit import BaseJitCell - bk = self.rtyper.annotator.bookkeeper - classdef = bk.getuniqueclassdef(BaseJitCell) - s_BaseJitCell_or_None = annmodel.SomeInstance(classdef, - can_be_None=True) - s_BaseJitCell_not_None = annmodel.SomeInstance(classdef) s_Str = annmodel.SomeString() # annhelper = MixLevelHelperAnnotator(self.translator.rtyper) for jd in self.jitdrivers_sd: - jd._set_jitcell_at_ptr = self._make_hook_graph(jd, - annhelper, jd.jitdriver.set_jitcell_at, annmodel.s_None, - s_BaseJitCell_not_None) - jd._get_jitcell_at_ptr = self._make_hook_graph(jd, - annhelper, jd.jitdriver.get_jitcell_at, s_BaseJitCell_or_None) jd._get_printable_location_ptr = self._make_hook_graph(jd, annhelper, jd.jitdriver.get_printable_location, s_Str) jd._confirm_enter_jit_ptr = self._make_hook_graph(jd, diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -4,7 +4,7 @@ from rpython.jit.codewriter import support, heaptracker, longlong from rpython.jit.metainterp import history from rpython.rlib.debug import debug_start, debug_stop, debug_print -from rpython.rlib.jit import PARAMETERS, BaseJitCell +from rpython.rlib.jit import PARAMETERS from rpython.rlib.nonconst import NonConstant from rpython.rlib.objectmodel import specialize, we_are_translated, r_dict from rpython.rlib.rarithmetic import intmask @@ -124,15 +124,14 @@ return rffi.cast(lltype.Signed, x) -class JitCell(BaseJitCell): - # the counter can mean the following things: - # counter >= 0: not yet traced, wait till threshold is reached - # counter == -1: there is an entry bridge for this cell - # counter == -2: tracing is currently going on for this cell - counter = 0 - dont_trace_here = False - extra_delay = chr(0) +JC_TRACING = 0x01 +JC_DONT_TRACE_HERE = 0x02 +JC_TEMPORARY = 0x04 + +class BaseJitCell(object): + flags = 0 # JC_xxx flags wref_procedure_token = None + next = None def get_procedure_token(self): if self.wref_procedure_token is not None: @@ -141,18 +140,28 @@ return token return None - def set_procedure_token(self, token): + def set_procedure_token(self, token, tmp=False): self.wref_procedure_token = self._makeref(token) + if tmp: + self.flags |= JC_TEMPORARY + else: + self.flags &= ~JC_TEMPORARY def _makeref(self, token): assert token is not None return weakref.ref(token) + def should_remove_jitcell(self): + if self.get_procedure_token() is not None: + return False # don't remove JitCells with a procedure_token + # don't remove JitCells that are being traced, or JitCells with + # the "don't trace here" flag. Other JitCells can be removed. + return (self.flags & (JC_TRACING | JC_DONT_TRACE_HERE)) == 0 + # ____________________________________________________________ class WarmEnterState(object): - THRESHOLD_LIMIT = sys.maxint // 2 def __init__(self, warmrunnerdesc, jitdriver_sd): "NOT_RPYTHON" @@ -166,17 +175,13 @@ self.profiler = None # initialize the state with the default values of the # parameters specified in rlib/jit.py - for name, default_value in PARAMETERS.items(): - meth = getattr(self, 'set_param_' + name) - meth(default_value) + if self.warmrunnerdesc is not None: + for name, default_value in PARAMETERS.items(): + meth = getattr(self, 'set_param_' + name) + meth(default_value) def _compute_threshold(self, threshold): - if threshold <= 0: - return 0 # never reach the THRESHOLD_LIMIT - if threshold < 2: - threshold = 2 - return (self.THRESHOLD_LIMIT // threshold) + 1 - # the number is at least 1, and at most about half THRESHOLD_LIMIT + return self.warmrunnerdesc.jitcounter.compute_threshold(threshold) def set_param_threshold(self, threshold): self.increment_threshold = self._compute_threshold(threshold) @@ -185,11 +190,14 @@ self.increment_function_threshold = self._compute_threshold(threshold) def set_param_trace_eagerness(self, value): - self.trace_eagerness = value + self.increment_trace_eagerness = self._compute_threshold(value) def set_param_trace_limit(self, value): self.trace_limit = value + def set_param_decay(self, decay): + self.warmrunnerdesc.jitcounter.set_decay(decay) + def set_param_inlining(self, value): self.inlining = value @@ -230,18 +238,17 @@ self.warmrunnerdesc.memory_manager.max_unroll_loops = value def disable_noninlinable_function(self, greenkey): - cell = self.jit_cell_at_key(greenkey) - cell.dont_trace_here = True + cell = self.JitCell.ensure_jit_cell_at_key(greenkey) + cell.flags |= JC_DONT_TRACE_HERE debug_start("jit-disableinlining") loc = self.get_location_str(greenkey) debug_print("disabled inlining", loc) debug_stop("jit-disableinlining") def attach_procedure_to_interp(self, greenkey, procedure_token): - cell = self.jit_cell_at_key(greenkey) + cell = self.JitCell.ensure_jit_cell_at_key(greenkey) old_token = cell.get_procedure_token() cell.set_procedure_token(procedure_token) - cell.counter = -1 # valid procedure bridge attached if old_token is not None: self.cpu.redirect_call_assembler(old_token, procedure_token) # procedure_token is also kept alive by any loop that used @@ -262,7 +269,7 @@ vinfo = jitdriver_sd.virtualizable_info index_of_virtualizable = jitdriver_sd.index_of_virtualizable num_green_args = jitdriver_sd.num_green_args - get_jitcell = self.make_jitcell_getter() + JitCell = self.make_jitcell_subclass() self.make_jitdriver_callbacks() confirm_enter_jit = self.confirm_enter_jit range_red_args = unrolling_iterable( @@ -280,6 +287,7 @@ assert 0, kind func_execute_token = self.cpu.make_execute_token(*ARGS) cpu = self.cpu + jitcounter = self.warmrunnerdesc.jitcounter def execute_assembler(loop_token, *args): # Call the backend to run the 'looptoken' with the given @@ -304,74 +312,72 @@ # assert 0, "should have raised" - def bound_reached(cell, *args): - # bound reached, but we do a last check: if it is the first - # time we reach the bound, or if another loop or bridge was - # compiled since the last time we reached it, then decrease - # the counter by a few percents instead. It should avoid - # sudden bursts of JIT-compilation, and also corner cases - # where we suddenly compile more than one loop because all - # counters reach the bound at the same time, but where - # compiling all but the first one is pointless. - curgen = warmrunnerdesc.memory_manager.current_generation - curgen = chr(intmask(curgen) & 0xFF) # only use 8 bits - if we_are_translated() and curgen != cell.extra_delay: - cell.counter = int(self.THRESHOLD_LIMIT * 0.98) - cell.extra_delay = curgen + def bound_reached(index, cell, *args): + if not confirm_enter_jit(*args): return - # - if not confirm_enter_jit(*args): - cell.counter = 0 - return + jitcounter.decay_all_counters() # start tracing from rpython.jit.metainterp.pyjitpl import MetaInterp metainterp = MetaInterp(metainterp_sd, jitdriver_sd) - # set counter to -2, to mean "tracing in effect" - cell.counter = -2 + greenargs = args[:num_green_args] + if cell is None: + cell = JitCell(*greenargs) + jitcounter.install_new_cell(index, cell) + cell.flags |= JC_TRACING try: metainterp.compile_and_run_once(jitdriver_sd, *args) finally: - if cell.counter == -2: - cell.counter = 0 + cell.flags &= ~JC_TRACING - def maybe_compile_and_run(threshold, *args): + def maybe_compile_and_run(increment_threshold, *args): """Entry point to the JIT. Called at the point with the can_enter_jit() hint. """ - # look for the cell corresponding to the current greenargs + # Look for the cell corresponding to the current greenargs. + # Search for the JitCell that is of the correct subclass of + # BaseJitCell, and that stores a key that compares equal. + # These few lines inline some logic that is also on the + # JitCell class, to avoid computing the hash several times. greenargs = args[:num_green_args] - cell = get_jitcell(True, *greenargs) + index = JitCell.get_index(*greenargs) + cell = jitcounter.lookup_chain(index) + while cell is not None: + if isinstance(cell, JitCell) and cell.comparekey(*greenargs): + break # found + cell = cell.next + else: + # not found. increment the counter + if jitcounter.tick(index, increment_threshold): + bound_reached(index, None, *args) + return - if cell.counter >= 0: - # update the profiling counter - n = cell.counter + threshold - if n <= self.THRESHOLD_LIMIT: # bound not reached - cell.counter = n - return - else: - bound_reached(cell, *args) - return - else: - if cell.counter != -1: - assert cell.counter == -2 + # Here, we have found 'cell'. + # + if cell.flags & (JC_TRACING | JC_TEMPORARY): + if cell.flags & JC_TRACING: # tracing already happening in some outer invocation of # this function. don't trace a second time. return - if not confirm_enter_jit(*args): - return - # machine code was already compiled for these greenargs - procedure_token = cell.get_procedure_token() - if procedure_token is None: # it was a weakref that has been freed - cell.counter = 0 - return - # extract and unspecialize the red arguments to pass to - # the assembler - execute_args = () - for i in range_red_args: - execute_args += (unspecialize_value(args[i]), ) - # run it! this executes until interrupted by an exception - execute_assembler(procedure_token, *execute_args) - # + # attached by compile_tmp_callback(). count normally + if jitcounter.tick(index, increment_threshold): + bound_reached(index, cell, *args) + return + # machine code was already compiled for these greenargs + procedure_token = cell.get_procedure_token() + if procedure_token is None: + # it was an aborted compilation, or maybe a weakref that + # has been freed + jitcounter.cleanup_chain(index) + return + if not confirm_enter_jit(*args): + return + # extract and unspecialize the red arguments to pass to + # the assembler + execute_args = () + for i in range_red_args: + execute_args += (unspecialize_value(args[i]), ) + # run it! this executes until interrupted by an exception + execute_assembler(procedure_token, *execute_args) assert 0, "should not reach this point" maybe_compile_and_run._dont_inline_ = True @@ -406,144 +412,77 @@ # ---------- - def make_jitcell_getter(self): + def make_jitcell_subclass(self): "NOT_RPYTHON" - if hasattr(self, 'jit_getter'): - return self.jit_getter + if hasattr(self, 'JitCell'): + return self.JitCell # - if self.jitdriver_sd._get_jitcell_at_ptr is None: - jit_getter = self._make_jitcell_getter_default() - else: - jit_getter = self._make_jitcell_getter_custom() + jitcounter = self.warmrunnerdesc.jitcounter + jitdriver_sd = self.jitdriver_sd + green_args_name_spec = unrolling_iterable([('g%d' % i, TYPE) + for i, TYPE in enumerate(jitdriver_sd._green_args_spec)]) + unwrap_greenkey = self.make_unwrap_greenkey() + random_initial_value = hash(self) # - unwrap_greenkey = self.make_unwrap_greenkey() + class JitCell(BaseJitCell): + def __init__(self, *greenargs): + i = 0 + for attrname, _ in green_args_name_spec: + setattr(self, attrname, greenargs[i]) + i = i + 1 + + def comparekey(self, *greenargs2): + i = 0 + for attrname, TYPE in green_args_name_spec: + item1 = getattr(self, attrname) + if not equal_whatever(TYPE, item1, greenargs2[i]): + return False + i = i + 1 + return True + + @staticmethod + def get_index(*greenargs): + x = random_initial_value + i = 0 + for _, TYPE in green_args_name_spec: + item = greenargs[i] + y = hash_whatever(TYPE, item) + x = intmask((x ^ y) * 1405695061) # prime number, 2**30~31 + i = i + 1 + return jitcounter.get_index(x) + + @staticmethod + def get_jitcell(*greenargs): + index = JitCell.get_index(*greenargs) + cell = jitcounter.lookup_chain(index) + while cell is not None: + if (isinstance(cell, JitCell) and + cell.comparekey(*greenargs)): + return cell + cell = cell.next + return None + + @staticmethod + def get_jit_cell_at_key(greenkey): + greenargs = unwrap_greenkey(greenkey) + return JitCell.get_jitcell(*greenargs) + + @staticmethod + def ensure_jit_cell_at_key(greenkey): + greenargs = unwrap_greenkey(greenkey) + index = JitCell.get_index(*greenargs) + cell = jitcounter.lookup_chain(index) + while cell is not None: + if (isinstance(cell, JitCell) and + cell.comparekey(*greenargs)): + return cell + cell = cell.next + newcell = JitCell(*greenargs) + jitcounter.install_new_cell(index, newcell) + return newcell # - def jit_cell_at_key(greenkey): - greenargs = unwrap_greenkey(greenkey) - return jit_getter(True, *greenargs) - self.jit_cell_at_key = jit_cell_at_key - self.jit_getter = jit_getter - # - return jit_getter - - def _make_jitcell_getter_default(self): - "NOT_RPYTHON" - jitdriver_sd = self.jitdriver_sd - green_args_spec = unrolling_iterable(jitdriver_sd._green_args_spec) - # - def comparekey(greenargs1, greenargs2): - i = 0 - for TYPE in green_args_spec: - if not equal_whatever(TYPE, greenargs1[i], greenargs2[i]): - return False - i = i + 1 - return True - # - def hashkey(greenargs): - x = 0x345678 - i = 0 - for TYPE in green_args_spec: - item = greenargs[i] - y = hash_whatever(TYPE, item) - x = intmask((1000003 * x) ^ y) - i = i + 1 - return x - # - jitcell_dict = r_dict(comparekey, hashkey) - try: - self.warmrunnerdesc.stats.jitcell_dicts.append(jitcell_dict) - except AttributeError: - pass - # - def _cleanup_dict(): - minimum = self.THRESHOLD_LIMIT // 20 # minimum 5% - killme = [] - for key, cell in jitcell_dict.iteritems(): - if cell.counter >= 0: - cell.counter = int(cell.counter * 0.92) - if cell.counter < minimum: - killme.append(key) - elif (cell.counter == -1 - and cell.get_procedure_token() is None): - killme.append(key) - for key in killme: - del jitcell_dict[key] - # - def _maybe_cleanup_dict(): - # Once in a while, rarely, when too many entries have - # been put in the jitdict_dict, we do a cleanup phase: - # we decay all counters and kill entries with a too - # low counter. - self._trigger_automatic_cleanup += 1 - if self._trigger_automatic_cleanup > 20000: - self._trigger_automatic_cleanup = 0 - _cleanup_dict() - # - self._trigger_automatic_cleanup = 0 - self._jitcell_dict = jitcell_dict # for tests - # - def get_jitcell(build, *greenargs): - try: - cell = jitcell_dict[greenargs] - except KeyError: - if not build: - return None - _maybe_cleanup_dict() - cell = JitCell() - jitcell_dict[greenargs] = cell - return cell - return get_jitcell - - def _make_jitcell_getter_custom(self): - "NOT_RPYTHON" - rtyper = self.warmrunnerdesc.rtyper - get_jitcell_at_ptr = self.jitdriver_sd._get_jitcell_at_ptr - set_jitcell_at_ptr = self.jitdriver_sd._set_jitcell_at_ptr - lltohlhack = {} - # note that there is no equivalent of _maybe_cleanup_dict() - # in the case of custom getters. We assume that the interpreter - # stores the JitCells on some objects that can go away by GC, - # like the PyCode objects in PyPy. - # - def get_jitcell(build, *greenargs): - fn = support.maybe_on_top_of_llinterp(rtyper, get_jitcell_at_ptr) - cellref = fn(*greenargs) - # - if we_are_translated(): - BASEJITCELL = lltype.typeOf(cellref) - cell = cast_base_ptr_to_instance(JitCell, cellref) - else: - if isinstance(cellref, (BaseJitCell, type(None))): - BASEJITCELL = None - cell = cellref - else: - BASEJITCELL = lltype.typeOf(cellref) - if cellref: - cell = lltohlhack[rtyper.type_system.deref(cellref)] - else: - cell = None - if not build: - return cell - if cell is None: - cell = JitCell() - # - if we_are_translated(): - cellref = cast_object_to_ptr(BASEJITCELL, cell) - else: - if BASEJITCELL is None: - cellref = cell - else: - if isinstance(BASEJITCELL, lltype.Ptr): - cellref = lltype.malloc(BASEJITCELL.TO) - else: - assert False, "no clue" - lltohlhack[rtyper.type_system.deref(cellref)] = cell - # - fn = support.maybe_on_top_of_llinterp(rtyper, - set_jitcell_at_ptr) - fn(cellref, *greenargs) - return cell - return get_jitcell + self.JitCell = JitCell + return JitCell # ---------- @@ -553,15 +492,15 @@ # warmrunnerdesc = self.warmrunnerdesc unwrap_greenkey = self.make_unwrap_greenkey() - jit_getter = self.make_jitcell_getter() + JitCell = self.make_jitcell_subclass() jd = self.jitdriver_sd cpu = self.cpu def can_inline_greenargs(*greenargs): if can_never_inline(*greenargs): return False - cell = jit_getter(False, *greenargs) - if cell is not None and cell.dont_trace_here: + cell = JitCell.get_jitcell(*greenargs) + if cell is not None and (cell.flags & JC_DONT_TRACE_HERE) != 0: return False return True def can_inline_callable(greenkey): @@ -585,16 +524,14 @@ redargtypes = ''.join([kind[0] for kind in jd.red_args_types]) def get_assembler_token(greenkey): - cell = self.jit_cell_at_key(greenkey) + cell = JitCell.ensure_jit_cell_at_key(greenkey) procedure_token = cell.get_procedure_token() if procedure_token is None: from rpython.jit.metainterp.compile import compile_tmp_callback - if cell.counter == -1: # used to be a valid entry bridge, - cell.counter = 0 # but was freed in the meantime. memmgr = warmrunnerdesc.memory_manager procedure_token = compile_tmp_callback(cpu, jd, greenkey, redargtypes, memmgr) - cell.set_procedure_token(procedure_token) + cell.set_procedure_token(procedure_token, tmp=True) return procedure_token self.get_assembler_token = get_assembler_token diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1459,6 +1459,9 @@ self.get_total_memory_used()) if self.DEBUG >= 2: self.debug_check_consistency() # expensive! + # + self.root_walker.finished_minor_collection() + # debug_stop("gc-minor") diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -60,6 +60,9 @@ def _walk_prebuilt_gc(self, callback): pass + def finished_minor_collection(self): + pass + class BaseDirectGCTest(object): GC_PARAMS = {} diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -142,8 +142,11 @@ if hasattr(translator, '_jit2gc'): self.layoutbuilder = translator._jit2gc['layoutbuilder'] + finished_minor_collection = translator._jit2gc.get( + 'invoke_after_minor_collection', None) else: self.layoutbuilder = TransformerLayoutBuilder(translator, GCClass) + finished_minor_collection = None self.layoutbuilder.transformer = self self.get_type_id = self.layoutbuilder.get_type_id @@ -167,6 +170,7 @@ gcdata.gc = GCClass(translator.config.translation, **GC_PARAMS) root_walker = self.build_root_walker() + root_walker.finished_minor_collection_func = finished_minor_collection self.root_walker = root_walker gcdata.set_query_functions(gcdata.gc) gcdata.gc.set_root_walker(root_walker) @@ -1285,6 +1289,7 @@ class BaseRootWalker(object): thread_setup = None + finished_minor_collection_func = None def __init__(self, gctransformer): self.gcdata = gctransformer.gcdata @@ -1322,6 +1327,11 @@ if collect_stack_root: self.walk_stack_roots(collect_stack_root) # abstract + def finished_minor_collection(self): + func = self.finished_minor_collection_func + if func is not None: + func() + def need_stacklet_support(self): raise Exception("%s does not support stacklets" % ( self.__class__.__name__,)) diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py --- a/rpython/memory/gcwrapper.py +++ b/rpython/memory/gcwrapper.py @@ -195,6 +195,9 @@ for obj in self.gcheap._all_prebuilt_gc: collect(llmemory.cast_ptr_to_adr(obj._as_ptr())) + def finished_minor_collection(self): + pass + class DirectRunLayoutBuilder(gctypelayout.TypeLayoutBuilder): diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -45,6 +45,8 @@ taggedpointers = False def setup_class(cls): + cls.marker = lltype.malloc(rffi.CArray(lltype.Signed), 1, + flavor='raw', zero=True) funcs0 = [] funcs2 = [] cleanups = [] @@ -744,12 +746,18 @@ def ensure_layoutbuilder(cls, translator): jit2gc = getattr(translator, '_jit2gc', None) if jit2gc: + assert 'invoke_after_minor_collection' in jit2gc return jit2gc['layoutbuilder'] + marker = cls.marker GCClass = cls.gcpolicy.transformerclass.GCClass layoutbuilder = framework.TransformerLayoutBuilder(translator, GCClass) layoutbuilder.delay_encoding() + + def seeme(): + marker[0] += 1 translator._jit2gc = { 'layoutbuilder': layoutbuilder, + 'invoke_after_minor_collection': seeme, } return layoutbuilder @@ -768,6 +776,15 @@ g() i += 1 return 0 + + if cls.gcname == 'incminimark': + marker = cls.marker + def cleanup(): + assert marker[0] > 0 + marker[0] = 0 + else: + cleanup = None + def fix_graph_of_g(translator): from rpython.translator.translator import graphof from rpython.flowspace.model import Constant @@ -788,7 +805,7 @@ break else: assert 0, "oups, not found" - return f, None, fix_graph_of_g + return f, cleanup, fix_graph_of_g def test_do_malloc_operations(self): run = self.runner("do_malloc_operations") diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -442,6 +442,7 @@ 'threshold': 'number of times a loop has to run for it to become hot', 'function_threshold': 'number of times a function must run for it to become traced from start', 'trace_eagerness': 'number of times a guard has to fail before we start compiling a bridge', + 'decay': 'amount to regularly decay counters by (0=none, 1000=max)', 'trace_limit': 'number of recorded operations before we abort tracing with ABORT_TOO_LONG', 'inlining': 'inline python functions or not (1/0)', 'loop_longevity': 'a parameter controlling how long loops will be kept before being freed, an estimate', @@ -455,6 +456,7 @@ PARAMETERS = {'threshold': 1039, # just above 1024, prime 'function_threshold': 1619, # slightly more than one above, also prime 'trace_eagerness': 200, + 'decay': 40, 'trace_limit': 6000, 'inlining': 1, 'loop_longevity': 1000, @@ -513,8 +515,8 @@ if '.' not in name]) self._heuristic_order = {} # check if 'reds' and 'greens' are ordered self._make_extregistryentries() - self.get_jitcell_at = get_jitcell_at - self.set_jitcell_at = set_jitcell_at + assert get_jitcell_at is None, "get_jitcell_at no longer used" + assert set_jitcell_at is None, "set_jitcell_at no longer used" self.get_printable_location = get_printable_location self.confirm_enter_jit = confirm_enter_jit self.can_never_inline = can_never_inline @@ -694,9 +696,6 @@ # # Annotation and rtyping of some of the JitDriver methods -class BaseJitCell(object): - __slots__ = () - class ExtEnterLeaveMarker(ExtRegistryEntry): # Replace a call to myjitdriver.jit_merge_point(**livevars) @@ -744,10 +743,7 @@ def annotate_hooks(self, **kwds_s): driver = self.instance.im_self - s_jitcell = self.bookkeeper.valueoftype(BaseJitCell) h = self.annotate_hook - h(driver.get_jitcell_at, driver.greens, **kwds_s) - h(driver.set_jitcell_at, driver.greens, [s_jitcell], **kwds_s) h(driver.get_printable_location, driver.greens, **kwds_s) def annotate_hook(self, func, variables, args_s=[], **kwds_s): From noreply at buildbot.pypy.org Fri Nov 1 18:36:26 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 1 Nov 2013 18:36:26 +0100 (CET) Subject: [pypy-commit] pypy default: Mention the MAKEFLAGS=-j.. option Message-ID: <20131101173626.1A7361C0112@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67797:dce0f5c826bb Date: 2013-11-01 18:35 +0100 http://bitbucket.org/pypy/pypy/changeset/dce0f5c826bb/ Log: Mention the MAKEFLAGS=-j.. option diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -23,6 +23,7 @@ @echo "$(PYPY_EXECUTABLE) to run the translation itself." @echo "This takes around 45 minutes and $(URAM) GB of RAM." endif + @echo "If you have N cores, consider setting \`\`MAKEFLAGS=-jN''." @echo "=============================================================" @echo @sleep 5 From noreply at buildbot.pypy.org Fri Nov 1 18:42:02 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 1 Nov 2013 18:42:02 +0100 (CET) Subject: [pypy-commit] pypy default: Improve the documentation of -jN Message-ID: <20131101174202.D6D4C1C0196@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67798:b37285b80ae9 Date: 2013-11-01 18:41 +0100 http://bitbucket.org/pypy/pypy/changeset/b37285b80ae9/ Log: Improve the documentation of -jN diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ pypy-c: @echo - @echo "=============================================================" + @echo "====================================================================" ifeq ($(PYPY_EXECUTABLE),) @echo "Building a regular (jitting) version of PyPy, using CPython." @echo "This takes around 2 hours and $(URAM) GB of RAM." @@ -23,8 +23,10 @@ @echo "$(PYPY_EXECUTABLE) to run the translation itself." @echo "This takes around 45 minutes and $(URAM) GB of RAM." endif - @echo "If you have N cores, consider setting \`\`MAKEFLAGS=-jN''." - @echo "=============================================================" + @echo + @echo "If you have N cores, consider running \`\`MAKEFLAGS=-jN make'' to" + @echo "speed up the last part of translation (\`\`make -jN'' does not work!)" + @echo "====================================================================" @echo @sleep 5 $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py From noreply at buildbot.pypy.org Fri Nov 1 18:44:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 1 Nov 2013 18:44:17 +0100 (CET) Subject: [pypy-commit] pypy default: Clarify Message-ID: <20131101174417.9EA7B1C0196@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67799:5a8ec99d6306 Date: 2013-11-01 18:43 +0100 http://bitbucket.org/pypy/pypy/changeset/5a8ec99d6306/ Log: Clarify diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -25,7 +25,7 @@ endif @echo @echo "If you have N cores, consider running \`\`MAKEFLAGS=-jN make'' to" - @echo "speed up the last part of translation (\`\`make -jN'' does not work!)" + @echo "speed up the last part (but \`\`make -jN'' does not work!)" @echo "====================================================================" @echo @sleep 5 From noreply at buildbot.pypy.org Fri Nov 1 19:08:31 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 1 Nov 2013 19:08:31 +0100 (CET) Subject: [pypy-commit] pypy default: Pffff, I see no way to pass down the original MAKEFLAGS to Message-ID: <20131101180831.40CA91C10AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67800:ae161dea0ef2 Date: 2013-11-01 19:07 +0100 http://bitbucket.org/pypy/pypy/changeset/ae161dea0ef2/ Log: Pffff, I see no way to pass down the original MAKEFLAGS to the subprocess. This makes the "-jN" option unusable. As a result, increase the time estimate... diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -21,11 +21,8 @@ else @echo "Building a regular (jitting) version of PyPy, using" @echo "$(PYPY_EXECUTABLE) to run the translation itself." - @echo "This takes around 45 minutes and $(URAM) GB of RAM." + @echo "This takes up to 1 hour and $(URAM) GB of RAM." endif - @echo - @echo "If you have N cores, consider running \`\`MAKEFLAGS=-jN make'' to" - @echo "speed up the last part (but \`\`make -jN'' does not work!)" @echo "====================================================================" @echo @sleep 5 From noreply at buildbot.pypy.org Fri Nov 1 19:24:56 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 1 Nov 2013 19:24:56 +0100 (CET) Subject: [pypy-commit] pypy default: Add a link to download.html, and mention the issue with -jN Message-ID: <20131101182456.44C541C10AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67801:481657b05e84 Date: 2013-11-01 19:24 +0100 http://bitbucket.org/pypy/pypy/changeset/481657b05e84/ Log: Add a link to download.html, and mention the issue with -jN diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -23,7 +23,16 @@ @echo "$(PYPY_EXECUTABLE) to run the translation itself." @echo "This takes up to 1 hour and $(URAM) GB of RAM." endif + @echo + @echo "For more control (e.g. to use multiple CPU cores during part of" + @echo "the process) you need to run \`\`rpython/bin/rpython'' directly." + @echo "For more information see \`\`http://pypy.org/download.html''." @echo "====================================================================" @echo @sleep 5 $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + +# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are +# replaced with an opaque --jobserver option by the time this Makefile +# runs. We cannot get their original value either: +# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html From noreply at buildbot.pypy.org Fri Nov 1 19:28:25 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 1 Nov 2013 19:28:25 +0100 (CET) Subject: [pypy-commit] pypy default: Add this file from the py3k branch, so that it appears as Message-ID: <20131101182825.129391C10AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67802:b96a176fed01 Date: 2013-11-01 19:27 +0100 http://bitbucket.org/pypy/pypy/changeset/b96a176fed01/ Log: Add this file from the py3k branch, so that it appears as http://doc.pypy.org/en/latest/release-pypy3-2.1.0-beta1.html diff --git a/pypy/doc/release-pypy3-2.1.0-beta1.rst b/pypy/doc/release-pypy3-2.1.0-beta1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-pypy3-2.1.0-beta1.rst @@ -0,0 +1,56 @@ +================ +PyPy3 2.1 beta 1 +================ + +We're pleased to announce the first beta of the upcoming 2.1 release of +PyPy3. This is the first release of PyPy which targets Python 3 (3.2.3) +compatibility. + +We would like to thank all of the people who donated_ to the `py3k proposal`_ +for supporting the work that went into this and future releases. + +You can download the PyPy3 2.1 beta 1 release here: + + http://pypy.org/download.html#pypy3-2-1-beta-1 + +Highlights +========== + +* The first release of PyPy3: support for Python 3, targetting CPython 3.2.3! + + - There are some `known issues`_ including performance regressions (issues + `#1540`_ & `#1541`_) slated to be resolved before the final release. + +What is PyPy? +============== + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3 or 3.2.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. + +Windows 64 work is still stalling and we would welcome a volunteer to handle +that. + +How to use PyPy? +================= + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`known issues`: https://bugs.pypy.org/issue?%40search_text=&title=py3k&%40columns=title&keyword=&id=&%40columns=id&creation=&creator=&release=&activity=&%40columns=activity&%40sort=activity&actor=&priority=&%40group=priority&status=-1%2C1%2C2%2C3%2C4%2C5%2C6&%40columns=status&assignedto=&%40columns=assignedto&%40pagesize=50&%40startwith=0&%40queryname=&%40old-queryname=&%40action=search +.. _`#1540`: https://bugs.pypy.org/issue1540 +.. _`#1541`: https://bugs.pypy.org/issue1541 +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +the PyPy team From noreply at buildbot.pypy.org Sun Nov 3 08:47:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 3 Nov 2013 08:47:58 +0100 (CET) Subject: [pypy-commit] pypy array-overallocation-in-nursery: hg merge default Message-ID: <20131103074758.336AA1C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: array-overallocation-in-nursery Changeset: r67803:fb6de955d58e Date: 2013-11-03 08:15 +0100 http://bitbucket.org/pypy/pypy/changeset/fb6de955d58e/ Log: hg merge default diff too long, truncating to 2000 out of 18486 lines diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,7 +1,38 @@ all: pypy-c +PYPY_EXECUTABLE := $(shell which pypy) +URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") + +ifeq ($(PYPY_EXECUTABLE),) +RUNINTERP = python +else +RUNINTERP = $(PYPY_EXECUTABLE) +endif + pypy-c: - @echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM" - @sleep 3 - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + @echo + @echo "====================================================================" +ifeq ($(PYPY_EXECUTABLE),) + @echo "Building a regular (jitting) version of PyPy, using CPython." + @echo "This takes around 2 hours and $(URAM) GB of RAM." + @echo "Note that pre-installing a PyPy binary would reduce this time" + @echo "and produce basically the same result." +else + @echo "Building a regular (jitting) version of PyPy, using" + @echo "$(PYPY_EXECUTABLE) to run the translation itself." + @echo "This takes up to 1 hour and $(URAM) GB of RAM." +endif + @echo + @echo "For more control (e.g. to use multiple CPU cores during part of" + @echo "the process) you need to run \`\`rpython/bin/rpython'' directly." + @echo "For more information see \`\`http://pypy.org/download.html''." + @echo "====================================================================" + @echo + @sleep 5 + $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + +# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are +# replaced with an opaque --jobserver option by the time this Makefile +# runs. We cannot get their original value either: +# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -1,5 +1,5 @@ ===================================== -PyPy: Python in Python Implementation +PyPy: Python in Python Implementation ===================================== Welcome to PyPy! @@ -26,9 +26,11 @@ Building ======== -build with:: +build with: - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py +.. code-block:: console + + $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest to use virtualenv with the resulting pypy-c as the interpreter, you can diff --git a/lib_pypy/numpypy/__init__.py b/lib_pypy/numpypy/__init__.py deleted file mode 100644 --- a/lib_pypy/numpypy/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -from . import core -from .core import * -from . import lib -from .lib import * - -from __builtin__ import bool, int, long, float, complex, object, unicode, str - -from .core import round, abs, max, min - -__version__ = '1.7.0' - -__all__ = ['__version__'] -__all__ += core.__all__ -__all__ += lib.__all__ - -#import sys -#sys.modules.setdefault('numpy', sys.modules['numpypy']) diff --git a/lib_pypy/numpypy/core/__init__.py b/lib_pypy/numpypy/core/__init__.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from . import multiarray -from . import umath -from . import numeric -from .numeric import * -from . import fromnumeric -from .fromnumeric import * -from . import shape_base -from .shape_base import * - -from .fromnumeric import amax as max, amin as min, \ - round_ as round -from .numeric import absolute as abs - -__all__ = [] -__all__ += numeric.__all__ -__all__ += fromnumeric.__all__ -__all__ += shape_base.__all__ diff --git a/lib_pypy/numpypy/core/_methods.py b/lib_pypy/numpypy/core/_methods.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/_methods.py +++ /dev/null @@ -1,109 +0,0 @@ -# Array methods which are called by the both the C-code for the method -# and the Python code for the NumPy-namespace function - -import multiarray as mu -import umath as um -from numeric import asanyarray - -def _amax(a, axis=None, out=None, keepdims=False): - return um.maximum.reduce(a, axis=axis, - out=out, keepdims=keepdims) - -def _amin(a, axis=None, out=None, keepdims=False): - return um.minimum.reduce(a, axis=axis, - out=out, keepdims=keepdims) - -def _sum(a, axis=None, dtype=None, out=None, keepdims=False): - return um.add.reduce(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def _prod(a, axis=None, dtype=None, out=None, keepdims=False): - return um.multiply.reduce(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def _any(a, axis=None, dtype=None, out=None, keepdims=False): - return um.logical_or.reduce(a, axis=axis, dtype=dtype, out=out, - keepdims=keepdims) - -def _all(a, axis=None, dtype=None, out=None, keepdims=False): - return um.logical_and.reduce(a, axis=axis, dtype=dtype, out=out, - keepdims=keepdims) - -def _count_reduce_items(arr, axis): - if axis is None: - axis = tuple(xrange(arr.ndim)) - if not isinstance(axis, tuple): - axis = (axis,) - items = 1 - for ax in axis: - items *= arr.shape[ax] - return items - -def _mean(a, axis=None, dtype=None, out=None, keepdims=False): - arr = asanyarray(a) - - # Upgrade bool, unsigned int, and int to float64 - if dtype is None and arr.dtype.kind in ['b','u','i']: - ret = um.add.reduce(arr, axis=axis, dtype='f8', - out=out, keepdims=keepdims) - else: - ret = um.add.reduce(arr, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - rcount = _count_reduce_items(arr, axis) - if isinstance(ret, mu.ndarray): - ret = um.true_divide(ret, rcount, - out=ret, casting='unsafe', subok=False) - else: - ret = ret / float(rcount) - return ret - -def _var(a, axis=None, dtype=None, out=None, ddof=0, - keepdims=False): - arr = asanyarray(a) - - # First compute the mean, saving 'rcount' for reuse later - if dtype is None and arr.dtype.kind in ['b','u','i']: - arrmean = um.add.reduce(arr, axis=axis, dtype='f8', keepdims=True) - else: - arrmean = um.add.reduce(arr, axis=axis, dtype=dtype, keepdims=True) - rcount = _count_reduce_items(arr, axis) - if isinstance(arrmean, mu.ndarray): - arrmean = um.true_divide(arrmean, rcount, - out=arrmean, casting='unsafe', subok=False) - else: - arrmean = arrmean / float(rcount) - - # arr - arrmean - x = arr - arrmean - - # (arr - arrmean) ** 2 - if arr.dtype.kind == 'c': - x = um.multiply(x, um.conjugate(x), out=x).real - else: - x = um.multiply(x, x, out=x) - - # add.reduce((arr - arrmean) ** 2, axis) - ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - - # add.reduce((arr - arrmean) ** 2, axis) / (n - ddof) - if not keepdims and isinstance(rcount, mu.ndarray): - rcount = rcount.squeeze(axis=axis) - rcount -= ddof - if isinstance(ret, mu.ndarray): - ret = um.true_divide(ret, rcount, - out=ret, casting='unsafe', subok=False) - else: - ret = ret / float(rcount) - - return ret - -def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) - - if isinstance(ret, mu.ndarray): - ret = um.sqrt(ret, out=ret) - else: - ret = um.sqrt(ret) - - return ret diff --git a/lib_pypy/numpypy/core/arrayprint.py b/lib_pypy/numpypy/core/arrayprint.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/arrayprint.py +++ /dev/null @@ -1,751 +0,0 @@ -"""Array printing function - -$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ -""" -__all__ = ["array2string", "set_printoptions", "get_printoptions"] -__docformat__ = 'restructuredtext' - -# -# Written by Konrad Hinsen -# last revision: 1996-3-13 -# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) -# and by Perry Greenfield 2000-4-1 for numarray -# and by Travis Oliphant 2005-8-22 for numpy - -import sys -import numerictypes as _nt -from umath import maximum, minimum, absolute, not_equal, isnan, isinf -#from multiarray import format_longfloat, datetime_as_string, datetime_data -from fromnumeric import ravel - - -def product(x, y): return x*y - -_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension -_summaryThreshold = 1000 # total items > triggers array summarization - -_float_output_precision = 8 -_float_output_suppress_small = False -_line_width = 75 -_nan_str = 'nan' -_inf_str = 'inf' -_formatter = None # formatting function for array elements - -if sys.version_info[0] >= 3: - from functools import reduce - -def set_printoptions(precision=None, threshold=None, edgeitems=None, - linewidth=None, suppress=None, - nanstr=None, infstr=None, - formatter=None): - """ - Set printing options. - - These options determine the way floating point numbers, arrays and - other NumPy objects are displayed. - - Parameters - ---------- - precision : int, optional - Number of digits of precision for floating point output (default 8). - threshold : int, optional - Total number of array elements which trigger summarization - rather than full repr (default 1000). - edgeitems : int, optional - Number of array items in summary at beginning and end of - each dimension (default 3). - linewidth : int, optional - The number of characters per line for the purpose of inserting - line breaks (default 75). - suppress : bool, optional - Whether or not suppress printing of small floating point values - using scientific notation (default False). - nanstr : str, optional - String representation of floating point not-a-number (default nan). - infstr : str, optional - String representation of floating point infinity (default inf). - formatter : dict of callables, optional - If not None, the keys should indicate the type(s) that the respective - formatting function applies to. Callables should return a string. - Types that are not specified (by their corresponding keys) are handled - by the default formatters. Individual types for which a formatter - can be set are:: - - - 'bool' - - 'int' - - 'timedelta' : a `numpy.timedelta64` - - 'datetime' : a `numpy.datetime64` - - 'float' - - 'longfloat' : 128-bit floats - - 'complexfloat' - - 'longcomplexfloat' : composed of two 128-bit floats - - 'numpy_str' : types `numpy.string_` and `numpy.unicode_` - - 'str' : all other strings - - Other keys that can be used to set a group of types at once are:: - - - 'all' : sets all types - - 'int_kind' : sets 'int' - - 'float_kind' : sets 'float' and 'longfloat' - - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' - - See Also - -------- - get_printoptions, set_string_function, array2string - - Notes - ----- - `formatter` is always reset with a call to `set_printoptions`. - - Examples - -------- - Floating point precision can be set: - - >>> np.set_printoptions(precision=4) - >>> print np.array([1.123456789]) - [ 1.1235] - - Long arrays can be summarised: - - >>> np.set_printoptions(threshold=5) - >>> print np.arange(10) - [0 1 2 ..., 7 8 9] - - Small results can be suppressed: - - >>> eps = np.finfo(float).eps - >>> x = np.arange(4.) - >>> x**2 - (x + eps)**2 - array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) - >>> np.set_printoptions(suppress=True) - >>> x**2 - (x + eps)**2 - array([-0., -0., 0., 0.]) - - A custom formatter can be used to display array elements as desired: - - >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) - >>> x = np.arange(3) - >>> x - array([int: 0, int: -1, int: -2]) - >>> np.set_printoptions() # formatter gets reset - >>> x - array([0, 1, 2]) - - To put back the default options, you can use: - - >>> np.set_printoptions(edgeitems=3,infstr='inf', - ... linewidth=75, nanstr='nan', precision=8, - ... suppress=False, threshold=1000, formatter=None) - """ - - global _summaryThreshold, _summaryEdgeItems, _float_output_precision, \ - _line_width, _float_output_suppress_small, _nan_str, _inf_str, \ - _formatter - if linewidth is not None: - _line_width = linewidth - if threshold is not None: - _summaryThreshold = threshold - if edgeitems is not None: - _summaryEdgeItems = edgeitems - if precision is not None: - _float_output_precision = precision - if suppress is not None: - _float_output_suppress_small = not not suppress - if nanstr is not None: - _nan_str = nanstr - if infstr is not None: - _inf_str = infstr - _formatter = formatter - -def get_printoptions(): - """ - Return the current print options. - - Returns - ------- - print_opts : dict - Dictionary of current print options with keys - - - precision : int - - threshold : int - - edgeitems : int - - linewidth : int - - suppress : bool - - nanstr : str - - infstr : str - - formatter : dict of callables - - For a full description of these options, see `set_printoptions`. - - See Also - -------- - set_printoptions, set_string_function - - """ - d = dict(precision=_float_output_precision, - threshold=_summaryThreshold, - edgeitems=_summaryEdgeItems, - linewidth=_line_width, - suppress=_float_output_suppress_small, - nanstr=_nan_str, - infstr=_inf_str, - formatter=_formatter) - return d - -def _leading_trailing(a): - import numeric as _nc - if a.ndim == 1: - if len(a) > 2*_summaryEdgeItems: - b = _nc.concatenate((a[:_summaryEdgeItems], - a[-_summaryEdgeItems:])) - else: - b = a - else: - if len(a) > 2*_summaryEdgeItems: - l = [_leading_trailing(a[i]) for i in range( - min(len(a), _summaryEdgeItems))] - l.extend([_leading_trailing(a[-i]) for i in range( - min(len(a), _summaryEdgeItems),0,-1)]) - else: - l = [_leading_trailing(a[i]) for i in range(0, len(a))] - b = _nc.concatenate(tuple(l)) - return b - -def _boolFormatter(x): - if x: - return ' True' - else: - return 'False' - - -def repr_format(x): - return repr(x) - -def _array2string(a, max_line_width, precision, suppress_small, separator=' ', - prefix="", formatter=None): - - if max_line_width is None: - max_line_width = _line_width - - if precision is None: - precision = _float_output_precision - - if suppress_small is None: - suppress_small = _float_output_suppress_small - - if formatter is None: - formatter = _formatter - - if a.size > _summaryThreshold: - summary_insert = "..., " - data = _leading_trailing(a) - else: - summary_insert = "" - data = ravel(a) - - formatdict = {'bool' : _boolFormatter, - 'int' : IntegerFormat(data), - 'float' : FloatFormat(data, precision, suppress_small), - 'longfloat' : FloatFormat(data, precision, suppress_small), - 'complexfloat' : ComplexFormat(data, precision, - suppress_small), - 'longcomplexfloat' : ComplexFormat(data, precision, - suppress_small), - 'datetime' : DatetimeFormat(data), - 'timedelta' : TimedeltaFormat(data), - 'numpystr' : repr_format, - 'str' : str} - - if formatter is not None: - fkeys = [k for k in formatter.keys() if formatter[k] is not None] - if 'all' in fkeys: - for key in formatdict.keys(): - formatdict[key] = formatter['all'] - if 'int_kind' in fkeys: - for key in ['int']: - formatdict[key] = formatter['int_kind'] - if 'float_kind' in fkeys: - for key in ['float', 'longfloat']: - formatdict[key] = formatter['float_kind'] - if 'complex_kind' in fkeys: - for key in ['complexfloat', 'longcomplexfloat']: - formatdict[key] = formatter['complex_kind'] - if 'str_kind' in fkeys: - for key in ['numpystr', 'str']: - formatdict[key] = formatter['str_kind'] - for key in formatdict.keys(): - if key in fkeys: - formatdict[key] = formatter[key] - - try: - format_function = a._format - msg = "The `_format` attribute is deprecated in Numpy 2.0 and " \ - "will be removed in 2.1. Use the `formatter` kw instead." - import warnings - warnings.warn(msg, DeprecationWarning) - except AttributeError: - # find the right formatting function for the array - dtypeobj = a.dtype.type - if issubclass(dtypeobj, _nt.bool_): - format_function = formatdict['bool'] - elif issubclass(dtypeobj, _nt.integer): - #if issubclass(dtypeobj, _nt.timedelta64): - # format_function = formatdict['timedelta'] - #else: - format_function = formatdict['int'] - elif issubclass(dtypeobj, _nt.floating): - if hasattr(_nt, 'longfloat') and issubclass(dtypeobj, _nt.longfloat): - format_function = formatdict['longfloat'] - else: - format_function = formatdict['float'] - elif issubclass(dtypeobj, _nt.complexfloating): - if hasattr(_nt, 'clongfloat') and issubclass(dtypeobj, _nt.clongfloat): - format_function = formatdict['longcomplexfloat'] - else: - format_function = formatdict['complexfloat'] - elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)): - format_function = formatdict['numpystr'] - #elif issubclass(dtypeobj, _nt.datetime64): - # format_function = formatdict['datetime'] - else: - format_function = formatdict['str'] - - # skip over "[" - next_line_prefix = " " - # skip over array( - next_line_prefix += " "*len(prefix) - - lst = _formatArray(a, format_function, len(a.shape), max_line_width, - next_line_prefix, separator, - _summaryEdgeItems, summary_insert)[:-1] - return lst - -def _convert_arrays(obj): - import numeric as _nc - newtup = [] - for k in obj: - if isinstance(k, _nc.ndarray): - k = k.tolist() - elif isinstance(k, tuple): - k = _convert_arrays(k) - newtup.append(k) - return tuple(newtup) - - -def array2string(a, max_line_width=None, precision=None, - suppress_small=None, separator=' ', prefix="", - style=repr, formatter=None): - """ - Return a string representation of an array. - - Parameters - ---------- - a : ndarray - Input array. - max_line_width : int, optional - The maximum number of columns the string should span. Newline - characters splits the string appropriately after array elements. - precision : int, optional - Floating point precision. Default is the current printing - precision (usually 8), which can be altered using `set_printoptions`. - suppress_small : bool, optional - Represent very small numbers as zero. A number is "very small" if it - is smaller than the current printing precision. - separator : str, optional - Inserted between elements. - prefix : str, optional - An array is typically printed as:: - - 'prefix(' + array2string(a) + ')' - - The length of the prefix string is used to align the - output correctly. - style : function, optional - A function that accepts an ndarray and returns a string. Used only - when the shape of `a` is equal to ``()``, i.e. for 0-D arrays. - formatter : dict of callables, optional - If not None, the keys should indicate the type(s) that the respective - formatting function applies to. Callables should return a string. - Types that are not specified (by their corresponding keys) are handled - by the default formatters. Individual types for which a formatter - can be set are:: - - - 'bool' - - 'int' - - 'timedelta' : a `numpy.timedelta64` - - 'datetime' : a `numpy.datetime64` - - 'float' - - 'longfloat' : 128-bit floats - - 'complexfloat' - - 'longcomplexfloat' : composed of two 128-bit floats - - 'numpy_str' : types `numpy.string_` and `numpy.unicode_` - - 'str' : all other strings - - Other keys that can be used to set a group of types at once are:: - - - 'all' : sets all types - - 'int_kind' : sets 'int' - - 'float_kind' : sets 'float' and 'longfloat' - - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' - - Returns - ------- - array_str : str - String representation of the array. - - Raises - ------ - TypeError : if a callable in `formatter` does not return a string. - - See Also - -------- - array_str, array_repr, set_printoptions, get_printoptions - - Notes - ----- - If a formatter is specified for a certain type, the `precision` keyword is - ignored for that type. - - Examples - -------- - >>> x = np.array([1e-16,1,2,3]) - >>> print np.array2string(x, precision=2, separator=',', - ... suppress_small=True) - [ 0., 1., 2., 3.] - - >>> x = np.arange(3.) - >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) - '[0.00 1.00 2.00]' - - >>> x = np.arange(3) - >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) - '[0x0L 0x1L 0x2L]' - - """ - - if a.shape == (): - x = a.item() - try: - lst = a._format(x) - msg = "The `_format` attribute is deprecated in Numpy " \ - "2.0 and will be removed in 2.1. Use the " \ - "`formatter` kw instead." - import warnings - warnings.warn(msg, DeprecationWarning) - except AttributeError: - if isinstance(x, tuple): - x = _convert_arrays(x) - lst = style(x) - elif reduce(product, a.shape) == 0: - # treat as a null array if any of shape elements == 0 - lst = "[]" - else: - lst = _array2string(a, max_line_width, precision, suppress_small, - separator, prefix, formatter=formatter) - return lst - -def _extendLine(s, line, word, max_line_len, next_line_prefix): - if len(line.rstrip()) + len(word.rstrip()) >= max_line_len: - s += line.rstrip() + "\n" - line = next_line_prefix - line += word - return s, line - - -def _formatArray(a, format_function, rank, max_line_len, - next_line_prefix, separator, edge_items, summary_insert): - """formatArray is designed for two modes of operation: - - 1. Full output - - 2. Summarized output - - """ - if rank == 0: - obj = a.item() - if isinstance(obj, tuple): - obj = _convert_arrays(obj) - return str(obj) - - if summary_insert and 2*edge_items < len(a): - leading_items, trailing_items, summary_insert1 = \ - edge_items, edge_items, summary_insert - else: - leading_items, trailing_items, summary_insert1 = 0, len(a), "" - - if rank == 1: - s = "" - line = next_line_prefix - for i in xrange(leading_items): - word = format_function(a[i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - - if summary_insert1: - s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix) - - for i in xrange(trailing_items, 1, -1): - word = format_function(a[-i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - - word = format_function(a[-1]) - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - s += line + "]\n" - s = '[' + s[len(next_line_prefix):] - else: - s = '[' - sep = separator.rstrip() - for i in xrange(leading_items): - if i > 0: - s += next_line_prefix - s += _formatArray(a[i], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1) - - if summary_insert1: - s += next_line_prefix + summary_insert1 + "\n" - - for i in xrange(trailing_items, 1, -1): - if leading_items or i != trailing_items: - s += next_line_prefix - s += _formatArray(a[-i], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1) - if leading_items or trailing_items > 1: - s += next_line_prefix - s += _formatArray(a[-1], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert).rstrip()+']\n' - return s - -class FloatFormat(object): - def __init__(self, data, precision, suppress_small, sign=False): - self.precision = precision - self.suppress_small = suppress_small - self.sign = sign - self.exp_format = False - self.large_exponent = False - self.max_str_len = 0 - try: - self.fillFormat(data) - except (TypeError, NotImplementedError): - # if reduce(data) fails, this instance will not be called, just - # instantiated in formatdict. - pass - - def fillFormat(self, data): - import numeric as _nc - errstate = _nc.seterr(all='ignore') - try: - special = isnan(data) | isinf(data) - valid = not_equal(data, 0) & ~special - non_zero = absolute(data.compress(valid)) - if len(non_zero) == 0: - max_val = 0. - min_val = 0. - else: - max_val = maximum.reduce(non_zero) - min_val = minimum.reduce(non_zero) - if max_val >= 1.e8: - self.exp_format = True - if not self.suppress_small and (min_val < 0.0001 - or max_val/min_val > 1000.): - self.exp_format = True - finally: - _nc.seterr(**errstate) - - if self.exp_format: - self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100 - self.max_str_len = 8 + self.precision - if self.large_exponent: - self.max_str_len += 1 - if self.sign: - format = '%+' - else: - format = '%' - format = format + '%d.%de' % (self.max_str_len, self.precision) - else: - format = '%%.%df' % (self.precision,) - if len(non_zero): - precision = max([_digits(x, self.precision, format) - for x in non_zero]) - else: - precision = 0 - precision = min(self.precision, precision) - self.max_str_len = len(str(int(max_val))) + precision + 2 - if _nc.any(special): - self.max_str_len = max(self.max_str_len, - len(_nan_str), - len(_inf_str)+1) - if self.sign: - format = '%#+' - else: - format = '%#' - format = format + '%d.%df' % (self.max_str_len, precision) - - self.special_fmt = '%%%ds' % (self.max_str_len,) - self.format = format - - def __call__(self, x, strip_zeros=True): - import numeric as _nc - err = _nc.seterr(invalid='ignore') - try: - if isnan(x): - if self.sign: - return self.special_fmt % ('+' + _nan_str,) - else: - return self.special_fmt % (_nan_str,) - elif isinf(x): - if x > 0: - if self.sign: - return self.special_fmt % ('+' + _inf_str,) - else: - return self.special_fmt % (_inf_str,) - else: - return self.special_fmt % ('-' + _inf_str,) - finally: - _nc.seterr(**err) - - s = self.format % x - if self.large_exponent: - # 3-digit exponent - expsign = s[-3] - if expsign == '+' or expsign == '-': - s = s[1:-2] + '0' + s[-2:] - elif self.exp_format: - # 2-digit exponent - if s[-3] == '0': - s = ' ' + s[:-3] + s[-2:] - elif strip_zeros: - z = s.rstrip('0') - s = z + ' '*(len(s)-len(z)) - return s - - -def _digits(x, precision, format): - s = format % x - z = s.rstrip('0') - return precision - len(s) + len(z) - - -_MAXINT = sys.maxint -_MININT = -sys.maxint-1 -class IntegerFormat(object): - def __init__(self, data): - try: - max_str_len = max(len(str(maximum.reduce(data))), - len(str(minimum.reduce(data)))) - self.format = '%' + str(max_str_len) + 'd' - except (TypeError, NotImplementedError): - # if reduce(data) fails, this instance will not be called, just - # instantiated in formatdict. - pass - except ValueError: - # this occurs when everything is NA - pass - - def __call__(self, x): - if _MININT < x < _MAXINT: - return self.format % x - else: - return "%s" % x - -class LongFloatFormat(object): - # XXX Have to add something to determine the width to use a la FloatFormat - # Right now, things won't line up properly - def __init__(self, precision, sign=False): - self.precision = precision - self.sign = sign - - def __call__(self, x): - if isnan(x): - if self.sign: - return '+' + _nan_str - else: - return ' ' + _nan_str - elif isinf(x): - if x > 0: - if self.sign: - return '+' + _inf_str - else: - return ' ' + _inf_str - else: - return '-' + _inf_str - elif x >= 0: - if self.sign: - return '+' + format_longfloat(x, self.precision) - else: - return ' ' + format_longfloat(x, self.precision) - else: - return format_longfloat(x, self.precision) - - -class LongComplexFormat(object): - def __init__(self, precision): - self.real_format = LongFloatFormat(precision) - self.imag_format = LongFloatFormat(precision, sign=True) - - def __call__(self, x): - r = self.real_format(x.real) - i = self.imag_format(x.imag) - return r + i + 'j' - - -class ComplexFormat(object): - def __init__(self, x, precision, suppress_small): - self.real_format = FloatFormat(x.real, precision, suppress_small) - self.imag_format = FloatFormat(x.imag, precision, suppress_small, - sign=True) - - def __call__(self, x): - r = self.real_format(x.real, strip_zeros=False) - i = self.imag_format(x.imag, strip_zeros=False) - if not self.imag_format.exp_format: - z = i.rstrip('0') - i = z + 'j' + ' '*(len(i)-len(z)) - else: - i = i + 'j' - return r + i - -class DatetimeFormat(object): - def __init__(self, x, unit=None, - timezone=None, casting='same_kind'): - # Get the unit from the dtype - if unit is None: - if x.dtype.kind == 'M': - unit = datetime_data(x.dtype)[0] - else: - unit = 's' - - # If timezone is default, make it 'local' or 'UTC' based on the unit - if timezone is None: - # Date units -> UTC, time units -> local - if unit in ('Y', 'M', 'W', 'D'): - self.timezone = 'UTC' - else: - self.timezone = 'local' - else: - self.timezone = timezone - self.unit = unit - self.casting = casting - - def __call__(self, x): - return "'%s'" % datetime_as_string(x, - unit=self.unit, - timezone=self.timezone, - casting=self.casting) - -class TimedeltaFormat(object): - def __init__(self, data): - if data.dtype.kind == 'm': - v = data.view('i8') - max_str_len = max(len(str(maximum.reduce(v))), - len(str(minimum.reduce(v)))) - self.format = '%' + str(max_str_len) + 'd' - - def __call__(self, x): - return self.format % x.astype('i8') - diff --git a/lib_pypy/numpypy/core/fromnumeric.py b/lib_pypy/numpypy/core/fromnumeric.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/fromnumeric.py +++ /dev/null @@ -1,2924 +0,0 @@ -###################################################################### -# This is a copy of numpy/core/fromnumeric.py modified for numpypy -###################################################################### -"""Module containing non-deprecated functions borrowed from Numeric. - -""" -from __future__ import division, absolute_import, print_function - -import types - -from . import multiarray as mu -from . import umath as um -from . import numerictypes as nt -from .numeric import asarray, array, asanyarray, concatenate -from . import _methods - - -# functions that are methods -__all__ = [ - 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax', - 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', - 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean', - 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put', - 'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_', - 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze', - 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var', - ] - - -try: - _gentype = types.GeneratorType -except AttributeError: - _gentype = type(None) - -# save away Python sum -_sum_ = sum - -# functions that are now methods -def _wrapit(obj, method, *args, **kwds): - try: - wrap = obj.__array_wrap__ - except AttributeError: - wrap = None - result = getattr(asarray(obj), method)(*args, **kwds) - if wrap: - if not isinstance(result, mu.ndarray): - result = asarray(result) - result = wrap(result) - return result - - -def take(a, indices, axis=None, out=None, mode='raise'): - """ - Take elements from an array along an axis. - - This function does the same thing as "fancy" indexing (indexing arrays - using arrays); however, it can be easier to use if you need elements - along a given axis. - - Parameters - ---------- - a : array_like - The source array. - indices : array_like - The indices of the values to extract. - - .. versionadded:: 1.8.0 - - Also allow scalars for indices. - axis : int, optional - The axis over which to select values. By default, the flattened - input array is used. - out : ndarray, optional - If provided, the result will be placed in this array. It should - be of the appropriate shape and dtype. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - 'clip' mode means that all indices that are too large are replaced - by the index that addresses the last element along that axis. Note - that this disables indexing with negative numbers. - - Returns - ------- - subarray : ndarray - The returned array has the same type as `a`. - - See Also - -------- - ndarray.take : equivalent method - - Examples - -------- - >>> a = [4, 3, 5, 7, 6, 8] - >>> indices = [0, 1, 4] - >>> np.take(a, indices) - array([4, 3, 6]) - - In this example if `a` is an ndarray, "fancy" indexing can be used. - - >>> a = np.array(a) - >>> a[indices] - array([4, 3, 6]) - - If `indices` is not one dimensional, the output also has these dimensions. - - >>> np.take(a, [[0, 1], [2, 3]]) - array([[4, 3], - [5, 7]]) - """ - try: - take = a.take - except AttributeError: - return _wrapit(a, 'take', indices, axis, out, mode) - return take(indices, axis, out, mode) - - -# not deprecated --- copy if necessary, view otherwise -def reshape(a, newshape, order='C'): - """ - Gives a new shape to an array without changing its data. - - Parameters - ---------- - a : array_like - Array to be reshaped. - newshape : int or tuple of ints - The new shape should be compatible with the original shape. If - an integer, then the result will be a 1-D array of that length. - One shape dimension can be -1. In this case, the value is inferred - from the length of the array and remaining dimensions. - order : {'C', 'F', 'A'}, optional - Read the elements of `a` using this index order, and place the elements - into the reshaped array using this index order. 'C' means to - read / write the elements using C-like index order, with the last axis index - changing fastest, back to the first axis index changing slowest. 'F' - means to read / write the elements using Fortran-like index order, with - the first index changing fastest, and the last index changing slowest. - Note that the 'C' and 'F' options take no account of the memory layout - of the underlying array, and only refer to the order of indexing. 'A' - means to read / write the elements in Fortran-like index order if `a` is - Fortran *contiguous* in memory, C-like order otherwise. - - Returns - ------- - reshaped_array : ndarray - This will be a new view object if possible; otherwise, it will - be a copy. Note there is no guarantee of the *memory layout* (C- or - Fortran- contiguous) of the returned array. - - See Also - -------- - ndarray.reshape : Equivalent method. - - Notes - ----- - It is not always possible to change the shape of an array without - copying the data. If you want an error to be raise if the data is copied, - you should assign the new shape to the shape attribute of the array:: - - >>> a = np.zeros((10, 2)) - # A transpose make the array non-contiguous - >>> b = a.T - # Taking a view makes it possible to modify the shape without modifying the - # initial object. - >>> c = b.view() - >>> c.shape = (20) - AttributeError: incompatible shape for a non-contiguous array - - The `order` keyword gives the index ordering both for *fetching* the values - from `a`, and then *placing* the values into the output array. For example, - let's say you have an array: - - >>> a = np.arange(6).reshape((3, 2)) - >>> a - array([[0, 1], - [2, 3], - [4, 5]]) - - You can think of reshaping as first raveling the array (using the given - index order), then inserting the elements from the raveled array into the - new array using the same kind of index ordering as was used for the - raveling. - - >>> np.reshape(a, (2, 3)) # C-like index ordering - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering - array([[0, 4, 3], - [2, 1, 5]]) - >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F') - array([[0, 4, 3], - [2, 1, 5]]) - - Examples - -------- - >>> a = np.array([[1,2,3], [4,5,6]]) - >>> np.reshape(a, 6) - array([1, 2, 3, 4, 5, 6]) - >>> np.reshape(a, 6, order='F') - array([1, 4, 2, 5, 3, 6]) - - >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 - array([[1, 2], - [3, 4], - [5, 6]]) - """ - assert order == 'C' - try: - reshape = a.reshape - except AttributeError: - return _wrapit(a, 'reshape', newshape) - return reshape(newshape) - - -def choose(a, choices, out=None, mode='raise'): - """ - Construct an array from an index array and a set of arrays to choose from. - - First of all, if confused or uncertain, definitely look at the Examples - - in its full generality, this function is less simple than it might - seem from the following code description (below ndi = - `numpy.lib.index_tricks`): - - ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``. - - But this omits some subtleties. Here is a fully general summary: - - Given an "index" array (`a`) of integers and a sequence of `n` arrays - (`choices`), `a` and each choice array are first broadcast, as necessary, - to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = - 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` - for each `i`. Then, a new array with shape ``Ba.shape`` is created as - follows: - - * if ``mode=raise`` (the default), then, first of all, each element of - `a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that - `i` (in that range) is the value at the `(j0, j1, ..., jm)` position - in `Ba` - then the value at the same position in the new array is the - value in `Bchoices[i]` at that same position; - - * if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed) - integer; modular arithmetic is used to map integers outside the range - `[0, n-1]` back into that range; and then the new array is constructed - as above; - - * if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed) - integer; negative integers are mapped to 0; values greater than `n-1` - are mapped to `n-1`; and then the new array is constructed as above. - - Parameters - ---------- - a : int array - This array must contain integers in `[0, n-1]`, where `n` is the number - of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any - integers are permissible. - choices : sequence of arrays - Choice arrays. `a` and all of the choices must be broadcastable to the - same shape. If `choices` is itself an array (not recommended), then - its outermost dimension (i.e., the one corresponding to - ``choices.shape[0]``) is taken as defining the "sequence". - out : array, optional - If provided, the result will be inserted into this array. It should - be of the appropriate shape and dtype. - mode : {'raise' (default), 'wrap', 'clip'}, optional - Specifies how indices outside `[0, n-1]` will be treated: - - * 'raise' : an exception is raised - * 'wrap' : value becomes value mod `n` - * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 - - Returns - ------- - merged_array : array - The merged result. - - Raises - ------ - ValueError: shape mismatch - If `a` and each choice array are not all broadcastable to the same - shape. - - See Also - -------- - ndarray.choose : equivalent method - - Notes - ----- - To reduce the chance of misinterpretation, even though the following - "abuse" is nominally supported, `choices` should neither be, nor be - thought of as, a single array, i.e., the outermost sequence-like container - should be either a list or a tuple. - - Examples - -------- - - >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], - ... [20, 21, 22, 23], [30, 31, 32, 33]] - >>> np.choose([2, 3, 1, 0], choices - ... # the first element of the result will be the first element of the - ... # third (2+1) "array" in choices, namely, 20; the second element - ... # will be the second element of the fourth (3+1) choice array, i.e., - ... # 31, etc. - ... ) - array([20, 31, 12, 3]) - >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) - array([20, 31, 12, 3]) - >>> # because there are 4 choice arrays - >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) - array([20, 1, 12, 3]) - >>> # i.e., 0 - - A couple examples illustrating how choose broadcasts: - - >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] - >>> choices = [-10, 10] - >>> np.choose(a, choices) - array([[ 10, -10, 10], - [-10, 10, -10], - [ 10, -10, 10]]) - - >>> # With thanks to Anne Archibald - >>> a = np.array([0, 1]).reshape((2,1,1)) - >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) - >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) - >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 - array([[[ 1, 1, 1, 1, 1], - [ 2, 2, 2, 2, 2], - [ 3, 3, 3, 3, 3]], - [[-1, -2, -3, -4, -5], - [-1, -2, -3, -4, -5], - [-1, -2, -3, -4, -5]]]) - - """ - try: - choose = a.choose - except AttributeError: - return _wrapit(a, 'choose', choices, out=out, mode=mode) - return choose(choices, out=out, mode=mode) - - -def repeat(a, repeats, axis=None): - """ - Repeat elements of an array. - - Parameters - ---------- - a : array_like - Input array. - repeats : {int, array of ints} - The number of repetitions for each element. `repeats` is broadcasted - to fit the shape of the given axis. - axis : int, optional - The axis along which to repeat values. By default, use the - flattened input array, and return a flat output array. - - Returns - ------- - repeated_array : ndarray - Output array which has the same shape as `a`, except along - the given axis. - - See Also - -------- - tile : Tile an array. - - Examples - -------- - >>> x = np.array([[1,2],[3,4]]) - >>> np.repeat(x, 2) - array([1, 1, 2, 2, 3, 3, 4, 4]) - >>> np.repeat(x, 3, axis=1) - array([[1, 1, 1, 2, 2, 2], - [3, 3, 3, 4, 4, 4]]) - >>> np.repeat(x, [1, 2], axis=0) - array([[1, 2], - [3, 4], - [3, 4]]) - - """ - try: - repeat = a.repeat - except AttributeError: - return _wrapit(a, 'repeat', repeats, axis) - return repeat(repeats, axis) - - -def put(a, ind, v, mode='raise'): - """ - Replaces specified elements of an array with given values. - - The indexing works on the flattened target array. `put` is roughly - equivalent to: - - :: - - a.flat[ind] = v - - Parameters - ---------- - a : ndarray - Target array. - ind : array_like - Target indices, interpreted as integers. - v : array_like - Values to place in `a` at target indices. If `v` is shorter than - `ind` it will be repeated as necessary. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - 'clip' mode means that all indices that are too large are replaced - by the index that addresses the last element along that axis. Note - that this disables indexing with negative numbers. - - See Also - -------- - putmask, place - - Examples - -------- - >>> a = np.arange(5) - >>> np.put(a, [0, 2], [-44, -55]) - >>> a - array([-44, 1, -55, 3, 4]) - - >>> a = np.arange(5) - >>> np.put(a, 22, -5, mode='clip') - >>> a - array([ 0, 1, 2, 3, -5]) - - """ - return a.put(ind, v, mode) - - -def swapaxes(a, axis1, axis2): - """ - Interchange two axes of an array. - - Parameters - ---------- - a : array_like - Input array. - axis1 : int - First axis. - axis2 : int - Second axis. - - Returns - ------- - a_swapped : ndarray - If `a` is an ndarray, then a view of `a` is returned; otherwise - a new array is created. - - Examples - -------- - >>> x = np.array([[1,2,3]]) - >>> np.swapaxes(x,0,1) - array([[1], - [2], - [3]]) - - >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) - >>> x - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - - >>> np.swapaxes(x,0,2) - array([[[0, 4], - [2, 6]], - [[1, 5], - [3, 7]]]) - - """ - try: - swapaxes = a.swapaxes - except AttributeError: - return _wrapit(a, 'swapaxes', axis1, axis2) - return swapaxes(axis1, axis2) - - -def transpose(a, axes=None): - """ - Permute the dimensions of an array. - - Parameters - ---------- - a : array_like - Input array. - axes : list of ints, optional - By default, reverse the dimensions, otherwise permute the axes - according to the values given. - - Returns - ------- - p : ndarray - `a` with its axes permuted. A view is returned whenever - possible. - - See Also - -------- - rollaxis - - Examples - -------- - >>> x = np.arange(4).reshape((2,2)) - >>> x - array([[0, 1], - [2, 3]]) - - >>> np.transpose(x) - array([[0, 2], - [1, 3]]) - - >>> x = np.ones((1, 2, 3)) - >>> np.transpose(x, (1, 0, 2)).shape - (2, 1, 3) - - """ - if axes is not None: - raise NotImplementedError('No "axes" arg yet.') - try: - transpose = a.transpose - except AttributeError: - return _wrapit(a, 'transpose') - return transpose() - - -def partition(a, kth, axis=-1, kind='introselect', order=None): - """ - Return a partitioned copy of an array. - - Creates a copy of the array with its elements rearranged in such a way that - the value of the element in kth position is in the position it would be in - a sorted array. All elements smaller than the kth element are moved before - this element and all equal or greater are moved behind it. The ordering of - the elements in the two partitions is undefined. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Array to be sorted. - kth : int or sequence of ints - Element index to partition by. The kth value of the element will be in - its final sorted position and all smaller elements will be moved before - it and all equal or greater elements behind it. - The order all elements in the partitions is undefined. - If provided with a sequence of kth it will partition all elements - indexed by kth of them into their sorted position at once. - axis : int or None, optional - Axis along which to sort. If None, the array is flattened before - sorting. The default is -1, which sorts along the last axis. - kind : {'introselect'}, optional - Selection algorithm. Default is 'introselect'. - order : list, optional - When `a` is a structured array, this argument specifies which fields - to compare first, second, and so on. This list does not need to - include all of the fields. - - Returns - ------- - partitioned_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - ndarray.partition : Method to sort an array in-place. - argpartition : Indirect partition. - sort : Full sorting - - Notes - ----- - The various selection algorithms are characterized by their average speed, - worst case performance, work space size, and whether they are stable. A - stable sort keeps items with the same key in the same relative order. The - three available algorithms have the following properties: - - ================= ======= ============= ============ ======= - kind speed worst case work space stable - ================= ======= ============= ============ ======= - 'introselect' 1 O(n) 0 no - ================= ======= ============= ============ ======= - - All the partition algorithms make temporary copies of the data when - partitioning along any but the last axis. Consequently, partitioning - along the last axis is faster and uses less space than partitioning - along any other axis. - - The sort order for complex numbers is lexicographic. If both the real - and imaginary parts are non-nan then the order is determined by the - real parts except when they are equal, in which case the order is - determined by the imaginary parts. - - Examples - -------- - >>> a = np.array([3, 4, 2, 1]) - >>> np.partition(a, 3) - array([2, 1, 3, 4]) - - >>> np.partition(a, (1, 3)) - array([1, 2, 3, 4]) - - """ - if axis is None: - a = asanyarray(a).flatten() - axis = 0 - else: - a = asanyarray(a).copy() - a.partition(kth, axis=axis, kind=kind, order=order) - return a - - -def argpartition(a, kth, axis=-1, kind='introselect', order=None): - """ - Perform an indirect partition along the given axis using the algorithm - specified by the `kind` keyword. It returns an array of indices of the - same shape as `a` that index data along the given axis in partitioned - order. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Array to sort. - kth : int or sequence of ints - Element index to partition by. The kth element will be in its final - sorted position and all smaller elements will be moved before it and - all larger elements behind it. - The order all elements in the partitions is undefined. - If provided with a sequence of kth it will partition all of them into - their sorted position at once. - axis : int or None, optional - Axis along which to sort. The default is -1 (the last axis). If None, - the flattened array is used. - kind : {'introselect'}, optional - Selection algorithm. Default is 'introselect' - order : list, optional - When `a` is an array with fields defined, this argument specifies - which fields to compare first, second, etc. Not all fields need be - specified. - - Returns - ------- - index_array : ndarray, int - Array of indices that partition `a` along the specified axis. - In other words, ``a[index_array]`` yields a sorted `a`. - - See Also - -------- - partition : Describes partition algorithms used. - ndarray.partition : Inplace partition. - argsort : Full indirect sort - - Notes - ----- - See `partition` for notes on the different selection algorithms. - - Examples - -------- - One dimensional array: - - >>> x = np.array([3, 4, 2, 1]) - >>> x[np.argpartition(x, 3)] - array([2, 1, 3, 4]) - >>> x[np.argpartition(x, (1, 3))] - array([1, 2, 3, 4]) - - """ - return a.argpartition(kth, axis, kind=kind, order=order) - - -def sort(a, axis=-1, kind='quicksort', order=None): - """ - Return a sorted copy of an array. - - Parameters - ---------- - a : array_like - Array to be sorted. - axis : int or None, optional - Axis along which to sort. If None, the array is flattened before - sorting. The default is -1, which sorts along the last axis. - kind : {'quicksort', 'mergesort', 'heapsort'}, optional - Sorting algorithm. Default is 'quicksort'. - order : list, optional - When `a` is a structured array, this argument specifies which fields - to compare first, second, and so on. This list does not need to - include all of the fields. - - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - ndarray.sort : Method to sort an array in-place. - argsort : Indirect sort. - lexsort : Indirect stable sort on multiple keys. - searchsorted : Find elements in a sorted array. - partition : Partial sort. - - Notes - ----- - The various sorting algorithms are characterized by their average speed, - worst case performance, work space size, and whether they are stable. A - stable sort keeps items with the same key in the same relative - order. The three available algorithms have the following - properties: - - =========== ======= ============= ============ ======= - kind speed worst case work space stable - =========== ======= ============= ============ ======= - 'quicksort' 1 O(n^2) 0 no - 'mergesort' 2 O(n*log(n)) ~n/2 yes - 'heapsort' 3 O(n*log(n)) 0 no - =========== ======= ============= ============ ======= - - All the sort algorithms make temporary copies of the data when - sorting along any but the last axis. Consequently, sorting along - the last axis is faster and uses less space than sorting along - any other axis. - - The sort order for complex numbers is lexicographic. If both the real - and imaginary parts are non-nan then the order is determined by the - real parts except when they are equal, in which case the order is - determined by the imaginary parts. - - Previous to numpy 1.4.0 sorting real and complex arrays containing nan - values led to undefined behaviour. In numpy versions >= 1.4.0 nan - values are sorted to the end. The extended sort order is: - - * Real: [R, nan] - * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] - - where R is a non-nan real value. Complex values with the same nan - placements are sorted according to the non-nan part if it exists. - Non-nan values are sorted as before. - - Examples - -------- - >>> a = np.array([[1,4],[3,1]]) - >>> np.sort(a) # sort along the last axis - array([[1, 4], - [1, 3]]) - >>> np.sort(a, axis=None) # sort the flattened array - array([1, 1, 3, 4]) - >>> np.sort(a, axis=0) # sort along the first axis - array([[1, 1], - [3, 4]]) - - Use the `order` keyword to specify a field to use when sorting a - structured array: - - >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] - >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), - ... ('Galahad', 1.7, 38)] - >>> a = np.array(values, dtype=dtype) # create a structured array - >>> np.sort(a, order='height') # doctest: +SKIP - array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), - ('Lancelot', 1.8999999999999999, 38)], - dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP - array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), - ('Arthur', 1.8, 41)], - dtype=[('name', '|S10'), ('height', '>> x = np.array([3, 1, 2]) - >>> np.argsort(x) - array([1, 2, 0]) - - Two-dimensional array: - - >>> x = np.array([[0, 3], [2, 2]]) - >>> x - array([[0, 3], - [2, 2]]) - - >>> np.argsort(x, axis=0) - array([[0, 1], - [1, 0]]) - - >>> np.argsort(x, axis=1) - array([[0, 1], - [0, 1]]) - - Sorting with keys: - - >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> x - array([(1, 0), (0, 1)], - dtype=[('x', '>> np.argsort(x, order=('x','y')) - array([1, 0]) - - >>> np.argsort(x, order=('y','x')) - array([0, 1]) - - """ - try: - argsort = a.argsort - except AttributeError: - return _wrapit(a, 'argsort', axis, kind, order) - return argsort(axis, kind, order) - - -def argmax(a, axis=None): - """ - Indices of the maximum values along an axis. - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - By default, the index is into the flattened array, otherwise - along the specified axis. - - Returns - ------- - index_array : ndarray of ints - Array of indices into the array. It has the same shape as `a.shape` - with the dimension along `axis` removed. - - See Also - -------- - ndarray.argmax, argmin - amax : The maximum value along a given axis. - unravel_index : Convert a flat index into an index tuple. - - Notes - ----- - In case of multiple occurrences of the maximum values, the indices - corresponding to the first occurrence are returned. - - Examples - -------- - >>> a = np.arange(6).reshape(2,3) - >>> a - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.argmax(a) - 5 - >>> np.argmax(a, axis=0) - array([1, 1, 1]) - >>> np.argmax(a, axis=1) - array([2, 2]) - - >>> b = np.arange(6) - >>> b[1] = 5 - >>> b - array([0, 5, 2, 3, 4, 5]) - >>> np.argmax(b) # Only the first occurrence is returned. - 1 - - """ - assert axis is None - try: - argmax = a.argmax - except AttributeError: - return _wrapit(a, 'argmax') - return argmax() - - -def argmin(a, axis=None): - """ - Return the indices of the minimum values along an axis. - - See Also - -------- - argmax : Similar function. Please refer to `numpy.argmax` for detailed - documentation. - - """ - assert axis is None - try: - argmin = a.argmin - except AttributeError: - return _wrapit(a, 'argmin') - return argmin() - - -def searchsorted(a, v, side='left', sorter=None): - """ - Find indices where elements should be inserted to maintain order. - - Find the indices into a sorted array `a` such that, if the - corresponding elements in `v` were inserted before the indices, the - order of `a` would be preserved. - - Parameters - ---------- - a : 1-D array_like - Input array. If `sorter` is None, then it must be sorted in - ascending order, otherwise `sorter` must be an array of indices - that sort it. - v : array_like - Values to insert into `a`. - side : {'left', 'right'}, optional - If 'left', the index of the first suitable location found is given. - If 'right', return the last such index. If there is no suitable - index, return either 0 or N (where N is the length of `a`). - sorter : 1-D array_like, optional - .. versionadded:: 1.7.0 - Optional array of integer indices that sort array a into ascending - order. They are typically the result of argsort. - - Returns - ------- - indices : array of ints - Array of insertion points with the same shape as `v`. - - See Also - -------- - sort : Return a sorted copy of an array. - histogram : Produce histogram from 1-D data. - - Notes - ----- - Binary search is used to find the required insertion points. - - As of Numpy 1.4.0 `searchsorted` works with real/complex arrays containing - `nan` values. The enhanced sort order is documented in `sort`. - - Examples - -------- - >>> np.searchsorted([1,2,3,4,5], 3) - 2 - >>> np.searchsorted([1,2,3,4,5], 3, side='right') - 3 - >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]) - array([0, 5, 1, 2]) - - """ - try: From noreply at buildbot.pypy.org Sun Nov 3 08:47:59 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 3 Nov 2013 08:47:59 +0100 (CET) Subject: [pypy-commit] pypy array-overallocation-in-nursery: Use GcArray(overallocated=True) in rlist.py. Message-ID: <20131103074759.6E5091C01DC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: array-overallocation-in-nursery Changeset: r67804:30af5046d2b3 Date: 2013-11-03 08:47 +0100 http://bitbucket.org/pypy/pypy/changeset/30af5046d2b3/ Log: Use GcArray(overallocated=True) in rlist.py. diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -200,13 +200,15 @@ # supports non-overlapping copies only if not we_are_translated(): - if source == dest: + if lltype.typeOf(source) == lltype.typeOf(dest) and source == dest: assert (source_start + length <= dest_start or dest_start + length <= source_start) - TP = lltype.typeOf(source).TO - assert TP == lltype.typeOf(dest).TO - if _contains_gcptr(TP.OF): + # supports copying between an overallocated GcArray and a regular GcArray + TP_SRC = lltype.typeOf(source).TO + TP_DST = lltype.typeOf(dest).TO + assert TP_SRC.OF == TP_DST.OF + if _contains_gcptr(TP_SRC.OF): # perform a write barrier that copies necessary flags from # source to dest if not llop.gc_writebarrier_before_copy(lltype.Bool, source, dest, @@ -220,13 +222,13 @@ return source_addr = llmemory.cast_ptr_to_adr(source) dest_addr = llmemory.cast_ptr_to_adr(dest) - cp_source_addr = (source_addr + llmemory.itemoffsetof(TP, 0) + - llmemory.sizeof(TP.OF) * source_start) - cp_dest_addr = (dest_addr + llmemory.itemoffsetof(TP, 0) + - llmemory.sizeof(TP.OF) * dest_start) + cp_source_addr = (source_addr + llmemory.itemoffsetof(TP_SRC, 0) + + llmemory.sizeof(TP_SRC.OF) * source_start) + cp_dest_addr = (dest_addr + llmemory.itemoffsetof(TP_DST, 0) + + llmemory.sizeof(TP_DST.OF) * dest_start) llmemory.raw_memcopy(cp_source_addr, cp_dest_addr, - llmemory.sizeof(TP.OF) * length) + llmemory.sizeof(TP_SRC.OF) * length) keepalive_until_here(source) keepalive_until_here(dest) diff --git a/rpython/rtyper/lltypesystem/rlist.py b/rpython/rtyper/lltypesystem/rlist.py --- a/rpython/rtyper/lltypesystem/rlist.py +++ b/rpython/rtyper/lltypesystem/rlist.py @@ -54,8 +54,11 @@ else: raise NotImplementedError(variant) - def get_itemarray_lowleveltype(self): + def get_itemarray_lowleveltype(self, overallocated): ITEM = self.item_repr.lowleveltype + hints = {} + if overallocated: + hints['overallocated'] = True ITEMARRAY = GcArray(ITEM, adtmeths = ADTIFixedList({ "ll_newlist": ll_fixed_newlist, @@ -65,7 +68,8 @@ "ITEM": ITEM, "ll_getitem_fast": ll_fixed_getitem_fast, "ll_setitem_fast": ll_fixed_setitem_fast, - })) + }), + hints = hints) return ITEMARRAY @@ -86,10 +90,9 @@ self.external_item_repr, self.item_repr = externalvsinternal(self.rtyper, self._item_repr_computer()) if isinstance(self.LIST, GcForwardReference): ITEM = self.item_repr.lowleveltype - ITEMARRAY = self.get_itemarray_lowleveltype() + ITEMARRAY = self.get_itemarray_lowleveltype(True) # XXX we might think of turning length stuff into Unsigned - self.LIST.become(GcStruct("list", ("length", Signed), - ("items", Ptr(ITEMARRAY)), + self.LIST.become(GcStruct("list", ("items", Ptr(ITEMARRAY)), adtmeths = ADTIList({ "ll_newlist": ll_newlist, "ll_newlist_hint": ll_newlist_hint, @@ -112,8 +115,8 @@ def prepare_const(self, n): result = malloc(self.LIST, immortal=True) - result.length = n result.items = malloc(self.LIST.items.TO, n) + result.items.used_length = n return result @@ -123,7 +126,7 @@ if 'item_repr' not in self.__dict__: self.external_item_repr, self.item_repr = externalvsinternal(self.rtyper, self._item_repr_computer()) if isinstance(self.LIST, GcForwardReference): - ITEMARRAY = self.get_itemarray_lowleveltype() + ITEMARRAY = self.get_itemarray_lowleveltype(overallocated=False) self.LIST.become(ITEMARRAY) def compact_repr(self): @@ -142,12 +145,14 @@ # adapted C code - at jit.look_inside_iff(lambda l, newsize, overallocate: jit.isconstant(len(l.items)) and jit.isconstant(newsize)) + at jit.look_inside_iff(lambda l, newsize, overallocate: + jit.isconstant(l.items.allocated_length) and + jit.isconstant(newsize)) @signature(types.any(), types.int(), types.bool(), returns=types.none()) def _ll_list_resize_hint_really(l, newsize, overallocate): """ Ensure l.items has room for at least newsize elements. Note that - l.items may change, and even if newsize is less than l.length on + l.items may change, and even if newsize is less than used_length on entry. """ # This over-allocates proportional to the list size, making room @@ -158,7 +163,6 @@ # The growth pattern is: 0, 4, 8, 16, 25, 35, 46, 58, 72, 88, ... if newsize <= 0: ll_assert(newsize == 0, "negative list length") - l.length = 0 l.items = _ll_new_empty_item_array(typeOf(l).TO) return elif overallocate: @@ -174,28 +178,28 @@ # linear complexity for e.g. repeated usage of l.append(). In case # it overflows sys.maxint, it is guaranteed negative, and the following # malloc() will fail. + newitems = malloc(typeOf(l).TO.items.TO, new_allocated) items = l.items - newitems = malloc(typeOf(l).TO.items.TO, new_allocated) - before_len = l.length + before_len = items.used_length if before_len: # avoids copying GC flags from the prebuilt_empty_array - if before_len < newsize: - p = before_len - else: - p = newsize + p = min(before_len, newsize) + newitems.used_length = p rgc.ll_arraycopy(items, newitems, 0, 0, p) l.items = newitems - at jit.look_inside_iff(lambda l, newsize: jit.isconstant(len(l.items)) and jit.isconstant(newsize)) + at jit.look_inside_iff(lambda l, newsize: + jit.isconstant(l.items.allocated_length) and + jit.isconstant(newsize)) def _ll_list_resize_hint(l, newsize): """Ensure l.items has room for at least newsize elements without - setting l.length to newsize. + setting used_length to newsize. Used before (and after) a batch operation that will likely grow the list to the newsize (and after the operation incase the initial guess lied). """ assert newsize >= 0, "negative list length" - allocated = len(l.items) + allocated = l.items.allocated_length if newsize > allocated: overallocate = True elif newsize < (allocated >> 1) - 5: @@ -208,11 +212,11 @@ def _ll_list_resize_really(l, newsize, overallocate): """ Ensure l.items has room for at least newsize elements, and set - l.length to newsize. Note that l.items may change, and even if - newsize is less than l.length on entry. + used_length to newsize. Note that l.items may change, and even if + newsize is less than used_length on entry. """ _ll_list_resize_hint_really(l, newsize, overallocate) - l.length = newsize + l.items.used_length = newsize # this common case was factored out of _ll_list_resize # to see if inlining it gives some speed-up. @@ -230,34 +234,30 @@ a realloc(). In the common case where we already overallocated enough, then this is a very fast operation. """ - cond = len(l.items) < newsize - if jit.isconstant(len(l.items)) and jit.isconstant(newsize): + allocated = l.items.allocated_length + cond = allocated < newsize + if jit.isconstant(allocated) and jit.isconstant(newsize): if cond: _ll_list_resize_hint_really(l, newsize, True) else: jit.conditional_call(cond, _ll_list_resize_hint_really, l, newsize, True) - l.length = newsize + l.items.used_length = newsize def _ll_list_resize_le(l, newsize): """This is called with 'newsize' smaller than the current length of the list. If 'newsize' falls lower than half the allocated size, proceed with the realloc() to shrink the list. """ - cond = newsize < (len(l.items) >> 1) - 5 + cond = newsize < (l.items.allocated_length >> 1) - 5 # note: overallocate=False should be safe here - if jit.isconstant(len(l.items)) and jit.isconstant(newsize): + if jit.isconstant(l.items.allocated_length) and jit.isconstant(newsize): if cond: _ll_list_resize_hint_really(l, newsize, False) else: jit.conditional_call(cond, _ll_list_resize_hint_really, l, newsize, False) - l.length = newsize - -def ll_append_noresize(l, newitem): - length = l.length - l.length = length + 1 - l.ll_setitem_fast(length, newitem) + l.items.used_length = newsize def ll_both_none(lst1, lst2): @@ -271,8 +271,9 @@ def ll_newlist(LIST, length): ll_assert(length >= 0, "negative list length") l = malloc(LIST) - l.length = length - l.items = malloc(LIST.items.TO, length) + items = malloc(LIST.items.TO, length) + items.used_length = length + l.items = items return l ll_newlist = typeMethod(ll_newlist) ll_newlist.oopspec = 'newlist(length)' @@ -280,7 +281,6 @@ def ll_newlist_hint(LIST, lengthhint): ll_assert(lengthhint >= 0, "negative list length") l = malloc(LIST) - l.length = 0 l.items = malloc(LIST.items.TO, lengthhint) return l ll_newlist_hint = typeMethod(ll_newlist_hint) @@ -292,7 +292,7 @@ INITIAL_EMPTY_LIST_ALLOCATION = 0 def _ll_prebuilt_empty_array(LISTITEM): - return malloc(LISTITEM, 0) + return malloc(LISTITEM, 0) # memo! _ll_prebuilt_empty_array._annspecialcase_ = 'specialize:memo' def _ll_new_empty_item_array(LIST): @@ -303,26 +303,25 @@ def ll_newemptylist(LIST): l = malloc(LIST) - l.length = 0 l.items = _ll_new_empty_item_array(LIST) return l ll_newemptylist = typeMethod(ll_newemptylist) ll_newemptylist.oopspec = 'newlist(0)' def ll_length(l): - return l.length + return l.items.used_length ll_length.oopspec = 'list.len(l)' def ll_items(l): return l.items def ll_getitem_fast(l, index): - ll_assert(index < l.length, "getitem out of bounds") + ll_assert(index < l.ll_length(), "getitem out of bounds") return l.ll_items()[index] ll_getitem_fast.oopspec = 'list.getitem(l, index)' def ll_setitem_fast(l, index, item): - ll_assert(index < l.length, "setitem out of bounds") + ll_assert(index < l.ll_length(), "setitem out of bounds") l.ll_items()[index] = item ll_setitem_fast.oopspec = 'list.setitem(l, index, item)' @@ -405,7 +404,7 @@ index = iter.index if index >= l.ll_length(): raise StopIteration - iter.index = index + 1 # cannot overflow because index < l.length + iter.index = index + 1 # cannot overflow because index < used_length return l.ll_getitem_fast(index) def ll_listnext_foldable(iter): @@ -414,7 +413,7 @@ index = iter.index if index >= l.ll_length(): raise StopIteration - iter.index = index + 1 # cannot overflow because index < l.length + iter.index = index + 1 # cannot overflow because index < used_length return ll_getitem_foldable_nonneg(l, index) def ll_getnextindex(iter): From noreply at buildbot.pypy.org Sun Nov 3 09:16:48 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 3 Nov 2013 09:16:48 +0100 (CET) Subject: [pypy-commit] pypy array-overallocation-in-nursery: One point of this refactoring is to allow list deletions to work without Message-ID: <20131103081648.B1AC21C01F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: array-overallocation-in-nursery Changeset: r67805:c70e3a202baf Date: 2013-11-03 09:15 +0100 http://bitbucket.org/pypy/pypy/changeset/c70e3a202baf/ Log: One point of this refactoring is to allow list deletions to work without replacing the items with NULL. diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -8,7 +8,7 @@ from rpython.rtyper.annlowlevel import ADTInterface from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import typeOf, Ptr, Void, Signed, Bool -from rpython.rtyper.lltypesystem.lltype import nullptr, Char, UniChar, Number +from rpython.rtyper.lltypesystem.lltype import Char, UniChar, Number from rpython.rtyper.rmodel import Repr, IteratorRepr, IntegerRepr from rpython.rtyper.rstr import AbstractStringRepr, AbstractCharRepr from rpython.tool.pairtype import pairtype, pair @@ -495,15 +495,6 @@ return LIST.ll_newlist(count) -# return a nullptr() if lst is a list of pointers it, else None. -def ll_null_item(lst): - LIST = typeOf(lst) - if isinstance(LIST, Ptr): - ITEM = LIST.TO.ITEM - if isinstance(ITEM, Ptr): - return nullptr(ITEM.TO) - return None - def listItemType(lst): LIST = typeOf(lst) return LIST.TO.ITEM @@ -603,9 +594,6 @@ index = length - 1 newlength = index res = l.ll_getitem_fast(index) - null = ll_null_item(l) - if null is not None: - l.ll_setitem_fast(index, null) l._ll_resize_le(newlength) return res @@ -622,9 +610,6 @@ l.ll_setitem_fast(j, l.ll_getitem_fast(j1)) j = j1 j1 += 1 - null = ll_null_item(l) - if null is not None: - l.ll_setitem_fast(newlength, null) l._ll_resize_le(newlength) return res ll_pop_zero.oopspec = 'list.pop(l, 0)' @@ -732,10 +717,6 @@ l.ll_setitem_fast(j, l.ll_getitem_fast(j1)) j = j1 j1 += 1 - - null = ll_null_item(l) - if null is not None: - l.ll_setitem_fast(newlength, null) l._ll_resize_le(newlength) ll_delitem_nonneg.oopspec = 'list.delitem(l, index)' @@ -890,12 +871,6 @@ ll_assert(start >= 0, "del l[start:] with unexpectedly negative start") ll_assert(start <= l.ll_length(), "del l[start:] with start > len(l)") newlength = start - null = ll_null_item(l) - if null is not None: - j = l.ll_length() - 1 - while j >= newlength: - l.ll_setitem_fast(j, null) - j -= 1 l._ll_resize_le(newlength) def ll_listdelslice_startstop(l, start, stop): @@ -912,12 +887,6 @@ l.ll_setitem_fast(j, l.ll_getitem_fast(i)) i += 1 j += 1 - null = ll_null_item(l) - if null is not None: - j = length - 1 - while j >= newlength: - l.ll_setitem_fast(j, null) - j -= 1 l._ll_resize_le(newlength) ll_listdelslice_startstop.oopspec = 'list.delslice_startstop(l, start, stop)' From noreply at buildbot.pypy.org Sun Nov 3 09:16:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 3 Nov 2013 09:16:50 +0100 (CET) Subject: [pypy-commit] pypy array-overallocation-in-nursery: Fix for refcounting. Deleted items are kept alive with refcounting, but too bad. Message-ID: <20131103081650.0C6721C01F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: array-overallocation-in-nursery Changeset: r67806:e5790b2e3315 Date: 2013-11-03 09:16 +0100 http://bitbucket.org/pypy/pypy/changeset/e5790b2e3315/ Log: Fix for refcounting. Deleted items are kept alive with refcounting, but too bad. diff --git a/rpython/memory/gctransform/support.py b/rpython/memory/gctransform/support.py --- a/rpython/memory/gctransform/support.py +++ b/rpython/memory/gctransform/support.py @@ -34,8 +34,15 @@ if isinstance(TYPE, lltype.Array): inner = list(_static_deallocator_body_for_type('v_%i'%depth, TYPE.OF, depth+1)) if inner: + # NB. in case of overallocated array, we still decref all items + # rather than just the used ones. This is because the unused + # items still have a reference. It's not really nice, but we + # don't really care about the refcounting GC in the first place... yield ' '*depth + 'i_%d = 0'%(depth,) - yield ' '*depth + 'l_%d = len(%s)'%(depth, v) + if TYPE._is_overallocated_array(): + yield ' '*depth + 'l_%d = %s.allocated_length'%(depth, v) + else: + yield ' '*depth + 'l_%d = len(%s)'%(depth, v) yield ' '*depth + 'while i_%d < l_%d:'%(depth, depth) yield ' '*depth + ' v_%d = %s[i_%d]'%(depth, v, depth) for line in inner: From noreply at buildbot.pypy.org Sun Nov 3 09:37:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 3 Nov 2013 09:37:06 +0100 (CET) Subject: [pypy-commit] pypy array-overallocation-in-nursery: Be more eager in overallocating lists (but not extra eager in Message-ID: <20131103083706.51EF51C11BD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: array-overallocation-in-nursery Changeset: r67807:c5de838f9d3b Date: 2013-11-03 09:36 +0100 http://bitbucket.org/pypy/pypy/changeset/c5de838f9d3b/ Log: Be more eager in overallocating lists (but not extra eager in this checkin; need to measure...) diff --git a/rpython/rtyper/lltypesystem/rlist.py b/rpython/rtyper/lltypesystem/rlist.py --- a/rpython/rtyper/lltypesystem/rlist.py +++ b/rpython/rtyper/lltypesystem/rlist.py @@ -156,22 +156,34 @@ entry. """ # This over-allocates proportional to the list size, making room - # for additional growth. The over-allocation is mild, but is - # enough to give linear-time amortized behavior over a long - # sequence of appends() in the presence of a poorly-performing - # system malloc(). - # The growth pattern is: 0, 4, 8, 16, 25, 35, 46, 58, 72, 88, ... + # for additional growth. The over-allocation is eager for small + # lists, and mild for large ones (but enough to give linear-time + # amortized behavior over a long sequence of appends()). + # + # The idea is that small lists exist in the nursery; if they + # survive, they will be copied out of it by the GC, which will + # reduce their allocated_length down to their used_length. + # + # The growth pattern is: + # 0, 8, 16, 32, (doubling region, adding 'newsize') + # 48, 72, 108, (adding 'newsize >> 1') + # 135, 168, 210, (adding 'newsize >> 2') + # 236, ... (adding 'newsize >> 3' from now on) if newsize <= 0: ll_assert(newsize == 0, "negative list length") l.items = _ll_new_empty_item_array(typeOf(l).TO) return elif overallocate: - if newsize < 9: - some = 3 + if newsize <= 4: + new_allocated = 8 + elif newsize < 32: + new_allocated = newsize + newsize + elif newsize < 128: + new_allocated = newsize + (newsize >> 1) + elif newsize < 224: + new_allocated = newsize + (newsize >> 2) else: - some = 6 - some += newsize >> 3 - new_allocated = newsize + some + new_allocated = newsize + (newsize >> 3) else: new_allocated = newsize # new_allocated is a bit more than newsize, enough to ensure an amortized From noreply at buildbot.pypy.org Sun Nov 3 10:49:23 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 3 Nov 2013 10:49:23 +0100 (CET) Subject: [pypy-commit] pypy array-overallocation-in-nursery: Test and fix Message-ID: <20131103094923.9135E1C01DC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: array-overallocation-in-nursery Changeset: r67808:2be7515d790c Date: 2013-11-03 10:48 +0100 http://bitbucket.org/pypy/pypy/changeset/2be7515d790c/ Log: Test and fix diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -847,7 +847,7 @@ self.fixed_list[p] = item else: self.fallback_list.append(item) - append._always_inline_ = True + append._always_inline_ = "force" def fence_exact(self): if self.optimize: diff --git a/rpython/translator/test/test_simplify.py b/rpython/translator/test/test_simplify.py --- a/rpython/translator/test/test_simplify.py +++ b/rpython/translator/test/test_simplify.py @@ -337,6 +337,18 @@ res = interp.eval_graph(graph, [10]) assert res == 5 + def test_simple_except(self): + def main(n): + try: + lst = [chr(x) for x in range(n)] + except: + return -1 + lst[0] = "foobar" + return ord(lst[5][0]) + interp, graph = self.specialize(main, [int]) + res = interp.eval_graph(graph, [10]) + assert res == 5 + def test_mutated_after_listcomp(self): def main(n): lst = [x*17 for x in range(n)] From noreply at buildbot.pypy.org Sun Nov 3 10:53:42 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 3 Nov 2013 10:53:42 +0100 (CET) Subject: [pypy-commit] pypy array-overallocation-in-nursery: Comment Message-ID: <20131103095342.3A5A81C01F6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: array-overallocation-in-nursery Changeset: r67809:8ec658318fe2 Date: 2013-11-03 10:53 +0100 http://bitbucket.org/pypy/pypy/changeset/8ec658318fe2/ Log: Comment diff --git a/rpython/translator/test/test_simplify.py b/rpython/translator/test/test_simplify.py --- a/rpython/translator/test/test_simplify.py +++ b/rpython/translator/test/test_simplify.py @@ -340,6 +340,8 @@ def test_simple_except(self): def main(n): try: + # char -> string conversion inside the append() method, + # forced because we later put a string inside lst lst = [chr(x) for x in range(n)] except: return -1 From noreply at buildbot.pypy.org Sun Nov 3 13:09:46 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 3 Nov 2013 13:09:46 +0100 (CET) Subject: [pypy-commit] pypy array-overallocation-in-nursery: Test and fix Message-ID: <20131103120946.327361C01F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: array-overallocation-in-nursery Changeset: r67811:e83aae23c574 Date: 2013-11-03 12:57 +0100 http://bitbucket.org/pypy/pypy/changeset/e83aae23c574/ Log: Test and fix diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1495,8 +1495,8 @@ # Get the number of card marker bytes in the header. typeid = self.get_type_id(obj) offset_to_length = self.varsize_offset_to_length(typeid) - length = (obj + offset_to_length).signed[0] - bytes = self.card_marking_bytes_for_length(length) + allocated_length = (obj + offset_to_length).signed[0] + bytes = self.card_marking_bytes_for_length(allocated_length) p = llarena.getfakearenaaddress(obj - size_gc_header) # # If the object doesn't have GCFLAG_TRACK_YOUNG_PTRS, then it @@ -1514,6 +1514,9 @@ else: # Walk the bytes encoding the card marker bits, and for # each bit set, call trace_and_drag_out_of_nursery_partial(). + offset_to_used_length = self.varsize_offset_to_used_length( + typeid) + used_length = (obj + offset_to_used_length).signed[0] interval_start = 0 while bytes > 0: p -= 1 @@ -1526,10 +1529,10 @@ interval_stop = interval_start + self.card_page_indices # if cardbyte & 1: - if interval_stop > length: - interval_stop = length - ll_assert(cardbyte <= 1 and bytes == 0, - "premature end of object") + if interval_stop > used_length: + interval_stop = used_length + if interval_stop <= interval_start: + break self.trace_and_drag_out_of_nursery_partial( obj, interval_start, interval_stop) # diff --git a/rpython/memory/test/snippet.py b/rpython/memory/test/snippet.py --- a/rpython/memory/test/snippet.py +++ b/rpython/memory/test/snippet.py @@ -161,6 +161,46 @@ res = self.run('from_objwithfinalizer_to_youngobj') assert res == 1 + def define_overallocated_items_not_kept_alive(cls): + from rpython.rtyper.annlowlevel import cast_instance_to_gcref + from rpython.rtyper.lltypesystem import llmemory + class B: + count = 0 + class A: + def __del__(self): + self.b.count += 1 + ARRAY1 = lltype.GcArray(llmemory.GCREF, hints={'overallocated': True}) + #ARRAY2 = lltype.GcArray(('a', llmemory.GCREF), ('n', lltype.Signed), + # hints={'overallocated': True}) + + def make(b): + a1 = lltype.malloc(ARRAY1, 10) + #a2 = lltype.malloc(ARRAY2, 10) + a1.used_length = 10 + #a2.used_length = 10 + i = 0 + while i < 10: + a = A() + a.b = b + a1[i] = cast_instance_to_gcref(a) + #a2[i].a = cast_instance_to_gcref(a) + i += 1 + return a1 + + def f(): + b = B() + a1 = make(b) + a1.used_length = 0 + #a2.used_length = 0 + llop.gc__collect(lltype.Void) + return b.count + return f + + def test_overallocated_items_not_kept_alive(self): + res = self.run('overallocated_items_not_kept_alive') + assert res == 10 + + class SemiSpaceGCTests(SemiSpaceGCTestDefines): # xxx messy @@ -172,5 +212,8 @@ elif name == 'from_objwithfinalizer_to_youngobj': func = self.define_from_objwithfinalizer_to_youngobj() return self.interpret(func, []) + elif name == 'overallocated_items_not_kept_alive': + func = self.define_overallocated_items_not_kept_alive() + return self.interpret(func, []) else: assert 0, "don't know what to do with that" From noreply at buildbot.pypy.org Sun Nov 3 13:09:44 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 3 Nov 2013 13:09:44 +0100 (CET) Subject: [pypy-commit] pypy array-overallocation-in-nursery: Kill llmemory.gcarrayofptr_xxx. Found out that we can write the Message-ID: <20131103120944.EB9731C01DC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: array-overallocation-in-nursery Changeset: r67810:e47b0be7335b Date: 2013-11-03 12:30 +0100 http://bitbucket.org/pypy/pypy/changeset/e47b0be7335b/ Log: Kill llmemory.gcarrayofptr_xxx. Found out that we can write the special case in gc/base.py without using them, but using only the size of a GCREF as a special constant. diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py --- a/rpython/memory/gc/base.py +++ b/rpython/memory/gc/base.py @@ -217,12 +217,13 @@ # which we have a special case for performance, or we call # the slow path version. if self.is_gcarrayofgcptr(typeid): - length = (obj + llmemory.gcarrayofptr_lengthoffset).signed[0] - item = obj + llmemory.gcarrayofptr_itemsoffset + item = obj + self.varsize_offset_to_variable_part(typeid) + length_adr = (obj + self.varsize_offset_to_used_length(typeid)) + length = length_adr.signed[0] while length > 0: if self.points_to_valid_gc_object(item): callback(item, arg) - item += llmemory.gcarrayofptr_singleitemoffset + item += llmemory.size_of_gcref length -= 1 return self._trace_slow_path(obj, callback, arg) @@ -273,12 +274,12 @@ typeid = self.get_type_id(obj) if self.is_gcarrayofgcptr(typeid): # a performance shortcut for GcArray(gcptr) - item = obj + llmemory.gcarrayofptr_itemsoffset - item += llmemory.gcarrayofptr_singleitemoffset * start + item = obj + self.varsize_offset_to_variable_part(typeid) + item += llmemory.size_of_gcref * start while length > 0: if self.points_to_valid_gc_object(item): callback(item, arg) - item += llmemory.gcarrayofptr_singleitemoffset + item += llmemory.size_of_gcref length -= 1 return ll_assert(self.has_gcptr_in_varsize(typeid), diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py --- a/rpython/memory/gctypelayout.py +++ b/rpython/memory/gctypelayout.py @@ -246,10 +246,10 @@ else: assert isinstance(TYPE, lltype.GcArray) ARRAY = TYPE + if (isinstance(ARRAY.OF, lltype.Ptr) + and ARRAY.OF.TO._gckind == 'gc'): + infobits |= T_IS_GCARRAY_OF_GCPTR if not ARRAY._is_overallocated_array(): - if (isinstance(ARRAY.OF, lltype.Ptr) - and ARRAY.OF.TO._gckind == 'gc'): - infobits |= T_IS_GCARRAY_OF_GCPTR varinfo.ofstolength = llmemory.ArrayLengthOffset(ARRAY) varinfo.ofstousedlength = varinfo.ofstolength else: diff --git a/rpython/rtyper/lltypesystem/llmemory.py b/rpython/rtyper/lltypesystem/llmemory.py --- a/rpython/rtyper/lltypesystem/llmemory.py +++ b/rpython/rtyper/lltypesystem/llmemory.py @@ -594,19 +594,11 @@ # GCREF is similar to Address but it is GC-aware GCREF = lltype.Ptr(lltype.GcOpaqueType('GCREF')) -# A placeholder for any type that is a GcArray of pointers. -# This can be used in the symbolic offsets above to access such arrays -# in a generic way. -GCARRAY_OF_PTR = lltype.GcArray(GCREF, hints={'placeholder': True}) -gcarrayofptr_lengthoffset = ArrayLengthOffset(GCARRAY_OF_PTR) -gcarrayofptr_itemsoffset = ArrayItemsOffset(GCARRAY_OF_PTR) -gcarrayofptr_singleitemoffset = ItemOffset(GCARRAY_OF_PTR.OF) +size_of_gcref = ItemOffset(GCREF) + def array_type_match(A1, A2): - return A1 == A2 or (A2 == GCARRAY_OF_PTR and - isinstance(A1, lltype.GcArray) and - isinstance(A1.OF, lltype.Ptr) and - not A1._hints.get('nolength') and - not A1._hints.get('overallocated')) + return A1 == A2 + def array_item_type_match(T1, T2): return T1 == T2 or (T2 == GCREF and isinstance(T1, lltype.Ptr)) diff --git a/rpython/rtyper/lltypesystem/test/test_llmemory.py b/rpython/rtyper/lltypesystem/test/test_llmemory.py --- a/rpython/rtyper/lltypesystem/test/test_llmemory.py +++ b/rpython/rtyper/lltypesystem/test/test_llmemory.py @@ -579,25 +579,6 @@ assert weakref_deref(lltype.Ptr(S), w) == lltype.nullptr(S) assert weakref_deref(lltype.Ptr(S1), w) == lltype.nullptr(S1) -def test_generic_gcarray_of_ptr(): - S1 = lltype.GcStruct('S1', ('x', lltype.Signed)) - A1 = lltype.GcArray(lltype.Ptr(S1)) - A2 = lltype.GcArray(lltype.Ptr(A1)) - a2 = lltype.malloc(A2, 3) - a2[1] = lltype.malloc(A1, 4) - a2[1][2] = lltype.malloc(S1) - a2[1][2].x = -33 - - adr = cast_ptr_to_adr(a2) - assert (adr + gcarrayofptr_lengthoffset).signed[0] == 3 - adr += gcarrayofptr_itemsoffset - adr += gcarrayofptr_singleitemoffset - adr = adr.address[0] # => a2[1] - assert (adr + gcarrayofptr_lengthoffset).signed[0] == 4 - adr += gcarrayofptr_itemsoffset + 2 * gcarrayofptr_singleitemoffset - adr = adr.address[0] # => s2[1][2] - assert (adr + FieldOffset(S1, 'x')).signed[0] == -33 - def test_raw_memclear_on_empty_array(): py.test.skip("Fails") A = lltype.FixedSizeArray(lltype.Signed, 0) From noreply at buildbot.pypy.org Sun Nov 3 13:24:32 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 3 Nov 2013 13:24:32 +0100 (CET) Subject: [pypy-commit] pypy array-overallocation-in-nursery: Apply the diff of incminimark.py to minimark.py. Message-ID: <20131103122432.D9D841C01F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: array-overallocation-in-nursery Changeset: r67812:d0b26d48adbf Date: 2013-11-03 13:24 +0100 http://bitbucket.org/pypy/pypy/changeset/d0b26d48adbf/ Log: Apply the diff of incminimark.py to minimark.py. diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -1355,8 +1355,8 @@ # Get the number of card marker bytes in the header. typeid = self.get_type_id(obj) offset_to_length = self.varsize_offset_to_length(typeid) - length = (obj + offset_to_length).signed[0] - bytes = self.card_marking_bytes_for_length(length) + allocated_length = (obj + offset_to_length).signed[0] + bytes = self.card_marking_bytes_for_length(allocated_length) p = llarena.getfakearenaaddress(obj - size_gc_header) # # If the object doesn't have GCFLAG_TRACK_YOUNG_PTRS, then it @@ -1374,6 +1374,9 @@ else: # Walk the bytes encoding the card marker bits, and for # each bit set, call trace_and_drag_out_of_nursery_partial(). + offset_to_used_length = self.varsize_offset_to_used_length( + typeid) + used_length = (obj + offset_to_used_length).signed[0] interval_start = 0 while bytes > 0: p -= 1 @@ -1386,10 +1389,10 @@ interval_stop = interval_start + self.card_page_indices # if cardbyte & 1: - if interval_stop > length: - interval_stop = length - ll_assert(cardbyte <= 1 and bytes == 0, - "premature end of object") + if interval_stop > used_length: + interval_stop = used_length + if interval_stop <= interval_start: + break self.trace_and_drag_out_of_nursery_partial( obj, interval_start, interval_stop) # @@ -1463,7 +1466,7 @@ # tid == -42, containing all flags), and it doesn't have the # HAS_SHADOW flag either. We must move it out of the nursery, # into a new nonmovable location. - totalsize = size_gc_header + self.get_size(obj) + totalsize = size_gc_header + self.shrink_and_get_size(obj) newhdr = self._malloc_out_of_nursery(totalsize) # elif self.is_forwarded(obj): From noreply at buildbot.pypy.org Sun Nov 3 19:26:34 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 3 Nov 2013 19:26:34 +0100 (CET) Subject: [pypy-commit] pypy default: Document this branch Message-ID: <20131103182634.600441C01DC@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67813:27c463966e39 Date: 2013-11-03 10:25 -0800 http://bitbucket.org/pypy/pypy/changeset/27c463966e39/ Log: Document this branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -130,3 +130,7 @@ only every 32 minor collection is enough). Should avoid the "memory leaks" observed in long-running processes, actually created by the jit compiling more and more rarely executed paths. + +.. branch: fix-trace-jit +Fixed the usage of sys.settrace() with the JIT. Also made it so using +sys.settrace() doesn't cause the GIL to be released on every single iteration. From noreply at buildbot.pypy.org Sun Nov 3 19:51:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 3 Nov 2013 19:51:12 +0100 (CET) Subject: [pypy-commit] pypy default: Add a debug_flush() just before calling _exit(). Otherwise the Message-ID: <20131103185112.656E21C11BD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67814:fb3b41cc383a Date: 2013-11-03 19:50 +0100 http://bitbucket.org/pypy/pypy/changeset/fb3b41cc383a/ Log: Add a debug_flush() just before calling _exit(). Otherwise the log file is left incomplete. diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1641,9 +1641,11 @@ @registering(os._exit) def register_os__exit(self): + from rpython.rlib import debug os__exit = self.llexternal('_exit', [rffi.INT], lltype.Void) def _exit_llimpl(status): + debug.debug_flush() os__exit(rffi.cast(rffi.INT, status)) return extdef([int], s_None, llimpl=_exit_llimpl, diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -499,6 +499,21 @@ assert 'foo}' in lines[2] assert len(lines) == 3 + def test_debug_flush_at_exit(self): + def entry_point(argv): + debug_start("mycat") + os._exit(0) + return 0 + + t, cbuilder = self.compile(entry_point) + path = udir.join('test_debug_flush_at_exit.log') + cbuilder.cmdexec("", env={'PYPYLOG': ':%s' % path}) + # + f = open(str(path), 'r') + lines = f.readlines() + f.close() + assert lines[0].endswith('{mycat\n') + def test_fatal_error(self): def g(x): if x == 1: From noreply at buildbot.pypy.org Sun Nov 3 20:01:48 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 3 Nov 2013 20:01:48 +0100 (CET) Subject: [pypy-commit] pypy default: Add __pypy__.debug_flush() to request a debug_flush from app-level Message-ID: <20131103190148.95A2E1C1448@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67815:69bc27908b13 Date: 2013-11-03 20:01 +0100 http://bitbucket.org/pypy/pypy/changeset/69bc27908b13/ Log: Add __pypy__.debug_flush() to request a debug_flush from app-level explicitly. diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -69,6 +69,7 @@ 'debug_print' : 'interp_debug.debug_print', 'debug_stop' : 'interp_debug.debug_stop', 'debug_print_once' : 'interp_debug.debug_print_once', + 'debug_flush' : 'interp_debug.debug_flush', 'builtinify' : 'interp_magic.builtinify', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', diff --git a/pypy/module/__pypy__/interp_debug.py b/pypy/module/__pypy__/interp_debug.py --- a/pypy/module/__pypy__/interp_debug.py +++ b/pypy/module/__pypy__/interp_debug.py @@ -23,3 +23,8 @@ debug_start(space, category) debug_print(space, args_w) debug_stop(space, category) + + + at jit.dont_look_inside +def debug_flush(space): + debug.debug_flush() diff --git a/pypy/module/__pypy__/test/test_debug.py b/pypy/module/__pypy__/test/test_debug.py --- a/pypy/module/__pypy__/test/test_debug.py +++ b/pypy/module/__pypy__/test/test_debug.py @@ -43,3 +43,8 @@ ('debug_print', 'hello world'), ]) ]) + + def test_debug_flush(self): + from __pypy__ import debug_flush + debug_flush() + # assert did not crash From noreply at buildbot.pypy.org Sun Nov 3 20:15:03 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 3 Nov 2013 20:15:03 +0100 (CET) Subject: [pypy-commit] pypy default: document this branch Message-ID: <20131103191503.2D85A1C00F8@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r67816:d7f24445f817 Date: 2013-11-03 21:14 +0200 http://bitbucket.org/pypy/pypy/changeset/d7f24445f817/ Log: document this branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -134,3 +134,6 @@ .. branch: fix-trace-jit Fixed the usage of sys.settrace() with the JIT. Also made it so using sys.settrace() doesn't cause the GIL to be released on every single iteration. + +.. branch: rordereddict +Implement OrderedDict in RPython From noreply at buildbot.pypy.org Sun Nov 3 21:29:53 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 3 Nov 2013 21:29:53 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: This was done forever ago Message-ID: <20131103202953.271291C01CB@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r5106:c2bd0a32f917 Date: 2013-11-03 12:29 -0800 http://bitbucket.org/pypy/extradoc/changeset/c2bd0a32f917/ Log: This was done forever ago diff --git a/planning/jit.txt b/planning/jit.txt --- a/planning/jit.txt +++ b/planning/jit.txt @@ -75,9 +75,6 @@ still remains, even though it's obviously not necessary since x and None will have different known_classes. -- optimize arraycopy also in the cases where one of the arrays is a virtual and - short. This is seen a lot in translate.py - - calling string equality does not automatically promote the argument to a constant. From noreply at buildbot.pypy.org Mon Nov 4 01:06:15 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 4 Nov 2013 01:06:15 +0100 (CET) Subject: [pypy-commit] pypy default: adjust this test now that numpypy is external Message-ID: <20131104000615.EF4BF1C11BD@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67817:cbfe08907bc1 Date: 2013-11-03 19:05 -0500 http://bitbucket.org/pypy/pypy/changeset/cbfe08907bc1/ Log: adjust this test now that numpypy is external diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -360,8 +360,8 @@ def test_ndarray_ref(self, space, api): w_obj = space.appexec([], """(): - import numpypy as np - return np.int64(2)""") + import _numpypy + return _numpypy.multiarray.dtype('int64').type(2)""") ref = make_ref(space, w_obj) api.Py_DecRef(ref) From noreply at buildbot.pypy.org Mon Nov 4 02:11:37 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 4 Nov 2013 02:11:37 +0100 (CET) Subject: [pypy-commit] pypy default: add shape/ndim attributes to scalars Message-ID: <20131104011137.AC9E61C00F8@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67818:923d5e1fe253 Date: 2013-11-03 20:10 -0500 http://bitbucket.org/pypy/pypy/changeset/923d5e1fe253/ Log: add shape/ndim attributes to scalars diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -265,6 +265,12 @@ def descr_get_itemsize(self, space): return self.get_dtype(space).descr_get_itemsize(space) + def descr_get_shape(self, space): + return space.newtuple([]) + + def descr_get_ndim(self, space): + return space.wrap(0) + class W_BoolBox(W_GenericBox, PrimitiveBox): descr__new__, _get_dtype, descr_reduce = new_dtype_getter("bool") @@ -514,6 +520,8 @@ dtype = GetSetProperty(W_GenericBox.descr_get_dtype), itemsize = GetSetProperty(W_GenericBox.descr_get_itemsize), + shape = GetSetProperty(W_GenericBox.descr_get_shape), + ndim = GetSetProperty(W_GenericBox.descr_get_ndim), ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -61,3 +61,5 @@ value = np.dtype('int64').type(12345) assert value.dtype == np.dtype('int64') assert value.itemsize == 8 + assert value.shape == () + assert value.ndim == 0 From noreply at buildbot.pypy.org Mon Nov 4 02:47:28 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 4 Nov 2013 02:47:28 +0100 (CET) Subject: [pypy-commit] pypy default: accept order argument for copy Message-ID: <20131104014728.C121D1C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67819:61d2ad085488 Date: 2013-11-03 20:38 -0500 http://bitbucket.org/pypy/pypy/changeset/61d2ad085488/ Log: accept order argument for copy diff --git a/pypy/module/micronumpy/constants.py b/pypy/module/micronumpy/constants.py --- a/pypy/module/micronumpy/constants.py +++ b/pypy/module/micronumpy/constants.py @@ -63,6 +63,11 @@ NPY_FLOATINGLTR = 'f' NPY_COMPLEXLTR = 'c' +NPY_ANYORDER = -1 +NPY_CORDER = 0 +NPY_FORTRANORDER = 1 +NPY_KEEPORDER = 2 + NPY_CLIP = 0 NPY_WRAP = 1 NPY_RAISE = 2 diff --git a/pypy/module/micronumpy/conversion_utils.py b/pypy/module/micronumpy/conversion_utils.py --- a/pypy/module/micronumpy/conversion_utils.py +++ b/pypy/module/micronumpy/conversion_utils.py @@ -18,3 +18,25 @@ return mode raise OperationError(space.w_TypeError, space.wrap("clipmode not understood")) + +def order_converter(space, w_order, default): + if space.is_none(w_order): + return default + if not space.isinstance_w(w_order, space.w_str): + if space.is_true(w_order): + return NPY_FORTRANORDER + else: + return NPY_CORDER + else: + order = space.str_w(w_order) + if order.startswith('C') or order.startswith('c'): + return NPY_CORDER + elif order.startswith('F') or order.startswith('f'): + return NPY_FORTRANORDER + elif order.startswith('A') or order.startswith('a'): + return NPY_ANYORDER + elif order.startswith('K') or order.startswith('k'): + return NPY_KEEPORDER + else: + raise OperationError(space.w_TypeError, space.wrap( + "order not understood")) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -18,6 +18,8 @@ from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation +from pypy.module.micronumpy.conversion_utils import order_converter +from pypy.module.micronumpy.constants import * def _find_shape(space, w_size, dtype): if space.is_none(w_size): @@ -287,7 +289,11 @@ def get_scalar_value(self): return self.implementation.get_scalar_value() - def descr_copy(self, space): + def descr_copy(self, space, w_order=None): + order = order_converter(space, w_order, NPY_KEEPORDER) + if order == NPY_FORTRANORDER: + raise OperationError(space.w_NotImplementedError, space.wrap( + "unsupported value for order")) copy = self.implementation.copy(space) w_subtype = space.type(self) return wrap_impl(space, w_subtype, self, copy) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -411,6 +411,22 @@ b = a.copy() assert b[0] == a[0] + a = arange(8) + b = a.copy(order=None) + assert (b == a).all() + b = a.copy(order=0) + assert (b == a).all() + b = a.copy(order='C') + assert (b == a).all() + b = a.copy(order='K') + assert (b == a).all() + b = a.copy(order='A') + assert (b == a).all() + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, a.copy, order='F') + raises(NotImplementedError, a.copy, order=True) + def test_iterator_init(self): from numpypy import array a = array(range(5)) From noreply at buildbot.pypy.org Mon Nov 4 02:47:30 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 4 Nov 2013 02:47:30 +0100 (CET) Subject: [pypy-commit] pypy default: clean up order argument for reshape Message-ID: <20131104014730.143621C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67820:c649a2a354b0 Date: 2013-11-03 20:46 -0500 http://bitbucket.org/pypy/pypy/changeset/c649a2a354b0/ Log: clean up order argument for reshape diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -345,17 +345,20 @@ numpypy.reshape : equivalent function """ args_w, kw_w = __args__.unpack() - order = 'C' + order = NPY_CORDER if kw_w: if "order" in kw_w: - order = space.str_w(kw_w["order"]) + order = order_converter(space, kw_w["order"], order) del kw_w["order"] if kw_w: raise OperationError(space.w_TypeError, space.wrap( "reshape() got unexpected keyword argument(s)")) - if order != 'C': + if order == NPY_KEEPORDER: + raise OperationError(space.w_ValueError, space.wrap( + "order 'K' is not permitted for reshaping")) + if order != NPY_CORDER and order != NPY_ANYORDER: raise OperationError(space.w_NotImplementedError, space.wrap( - "order not implemented")) + "unsupported value for order")) if len(args_w) == 1: w_shape = args_w[0] else: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -715,7 +715,9 @@ a = array([[[[]]]]) assert a.reshape((0,)).shape == (0,) assert a.reshape((0,), order='C').shape == (0,) + assert a.reshape((0,), order='A').shape == (0,) raises(TypeError, a.reshape, (0,), badarg="C") + raises(ValueError, a.reshape, (0,), order="K") import sys if '__pypy__' in sys.builtin_module_names: raises(NotImplementedError, a.reshape, (0,), order='F') From noreply at buildbot.pypy.org Mon Nov 4 07:53:28 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 4 Nov 2013 07:53:28 +0100 (CET) Subject: [pypy-commit] pypy default: fix complex cast on complex scalar Message-ID: <20131104065328.ABB711C01DC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67821:07b43a29d735 Date: 2013-11-04 01:07 -0500 http://bitbucket.org/pypy/pypy/changeset/07b43a29d735/ Log: fix complex cast on complex scalar diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -339,6 +339,10 @@ descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float64") class W_ComplexFloatingBox(W_InexactBox): + def descr_complex(self, space): + assert isinstance(self, ComplexBox) + return space.wrap(complex(self.real, self.imag)) + def descr_get_real(self, space): dtype = self._COMPONENTS_BOX._get_dtype(space) box = self.convert_real_to(dtype) @@ -644,6 +648,7 @@ __module__ = "numpypy", __new__ = interp2app(W_Complex64Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex64Box.descr_reduce), + __complex__ = interp2app(W_ComplexFloatingBox.descr_complex), real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) @@ -652,6 +657,7 @@ __module__ = "numpypy", __new__ = interp2app(W_Complex128Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex128Box.descr_reduce), + __complex__ = interp2app(W_ComplexFloatingBox.descr_complex), real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) @@ -667,6 +673,7 @@ __module__ = "numpypy", __new__ = interp2app(W_ComplexLongBox.descr__new__.im_func), __reduce__ = interp2app(W_ComplexLongBox.descr_reduce), + __complex__ = interp2app(W_ComplexFloatingBox.descr_complex), real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) diff --git a/pypy/module/micronumpy/test/dummy_module.py b/pypy/module/micronumpy/test/dummy_module.py --- a/pypy/module/micronumpy/test/dummy_module.py +++ b/pypy/module/micronumpy/test/dummy_module.py @@ -5,7 +5,9 @@ ufunc = type(sin) types = ['bool8', 'byte', 'ubyte', 'short', 'ushort', 'longlong', 'ulonglong', - 'single', 'longfloat', 'longdouble', 'csingle', 'cfloat', 'void'] + 'single', 'double', 'longfloat', 'longdouble', + 'csingle', 'cdouble', 'cfloat', 'clongdouble', + 'void'] for t in ('int', 'uint'): for s in (8, 16, 32, 64, 'p'): types.append(t + str(s)) diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -63,3 +63,9 @@ assert value.itemsize == 8 assert value.shape == () assert value.ndim == 0 + + def test_complex_scalar_complex_cast(self): + import numpy as np + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = tp(1+2j) + assert complex(x) == 1+2j From noreply at buildbot.pypy.org Mon Nov 4 07:53:29 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 4 Nov 2013 07:53:29 +0100 (CET) Subject: [pypy-commit] pypy default: update __module__ for boxes Message-ID: <20131104065329.D74A81C01F2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67822:02757994cd72 Date: 2013-11-04 01:52 -0500 http://bitbucket.org/pypy/pypy/changeset/02757994cd72/ Log: update __module__ for boxes diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -458,7 +458,7 @@ return W_UnicodeBox(arr, 0, arr.dtype) W_GenericBox.typedef = TypeDef("generic", - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_GenericBox.descr__new__.im_func), @@ -529,72 +529,72 @@ ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_BoolBox.descr__new__.im_func), __index__ = interp2app(W_BoolBox.descr_index), __reduce__ = interp2app(W_BoolBox.descr_reduce), ) W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", ) W_IntegerBox.typedef = TypeDef("integer", W_NumberBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", ) W_SignedIntegerBox.typedef = TypeDef("signedinteger", W_IntegerBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", ) W_UnsignedIntegerBox.typedef = TypeDef("unsignedinteger", W_IntegerBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", ) W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_Int8Box.descr__new__.im_func), __index__ = interp2app(W_Int8Box.descr_index), __reduce__ = interp2app(W_Int8Box.descr_reduce), ) W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntegerBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_UInt8Box.descr__new__.im_func), __index__ = interp2app(W_UInt8Box.descr_index), __reduce__ = interp2app(W_UInt8Box.descr_reduce), ) W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_Int16Box.descr__new__.im_func), __index__ = interp2app(W_Int16Box.descr_index), __reduce__ = interp2app(W_Int16Box.descr_reduce), ) W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntegerBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_UInt16Box.descr__new__.im_func), __index__ = interp2app(W_UInt16Box.descr_index), __reduce__ = interp2app(W_UInt16Box.descr_reduce), ) W_Int32Box.typedef = TypeDef("int32", (W_SignedIntegerBox.typedef,) + MIXIN_32, - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_Int32Box.descr__new__.im_func), __index__ = interp2app(W_Int32Box.descr_index), __reduce__ = interp2app(W_Int32Box.descr_reduce), ) W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntegerBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_UInt32Box.descr__new__.im_func), __index__ = interp2app(W_UInt32Box.descr_index), __reduce__ = interp2app(W_UInt32Box.descr_reduce), ) W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_Int64Box.descr__new__.im_func), __index__ = interp2app(W_Int64Box.descr_index), __reduce__ = interp2app(W_Int64Box.descr_reduce), @@ -608,44 +608,44 @@ W_ULongBox = W_UInt64Box W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_UInt64Box.descr__new__.im_func), __index__ = interp2app(W_UInt64Box.descr_index), __reduce__ = interp2app(W_UInt64Box.descr_reduce), ) W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", ) W_FloatingBox.typedef = TypeDef("floating", W_InexactBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", ) W_Float16Box.typedef = TypeDef("float16", W_FloatingBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_Float16Box.descr__new__.im_func), __reduce__ = interp2app(W_Float16Box.descr_reduce), ) W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_Float32Box.descr__new__.im_func), __reduce__ = interp2app(W_Float32Box.descr_reduce), ) W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_Float64Box.descr__new__.im_func), __reduce__ = interp2app(W_Float64Box.descr_reduce), ) W_ComplexFloatingBox.typedef = TypeDef("complexfloating", W_InexactBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", ) W_Complex64Box.typedef = TypeDef("complex64", (W_ComplexFloatingBox.typedef), - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_Complex64Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex64Box.descr_reduce), __complex__ = interp2app(W_ComplexFloatingBox.descr_complex), @@ -654,7 +654,7 @@ ) W_Complex128Box.typedef = TypeDef("complex128", (W_ComplexFloatingBox.typedef, complex_typedef), - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_Complex128Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex128Box.descr_reduce), __complex__ = interp2app(W_ComplexFloatingBox.descr_complex), @@ -664,13 +664,13 @@ if long_double_size in (8, 12, 16): W_FloatLongBox.typedef = TypeDef("float%d" % (long_double_size * 8), (W_FloatingBox.typedef), - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_FloatLongBox.descr__new__.im_func), __reduce__ = interp2app(W_FloatLongBox.descr_reduce), ) W_ComplexLongBox.typedef = TypeDef("complex%d" % (long_double_size * 16), (W_ComplexFloatingBox.typedef, complex_typedef), - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_ComplexLongBox.descr__new__.im_func), __reduce__ = interp2app(W_ComplexLongBox.descr_reduce), __complex__ = interp2app(W_ComplexFloatingBox.descr_complex), @@ -679,26 +679,26 @@ ) W_FlexibleBox.typedef = TypeDef("flexible", W_GenericBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", ) W_VoidBox.typedef = TypeDef("void", W_FlexibleBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_VoidBox.descr__new__.im_func), __getitem__ = interp2app(W_VoidBox.descr_getitem), __setitem__ = interp2app(W_VoidBox.descr_setitem), ) W_CharacterBox.typedef = TypeDef("character", W_FlexibleBox.typedef, - __module__ = "numpypy", + __module__ = "numpy", ) W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, str_typedef), - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), ) W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, unicode_typedef), - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), ) From noreply at buildbot.pypy.org Mon Nov 4 07:53:30 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 4 Nov 2013 07:53:30 +0100 (CET) Subject: [pypy-commit] pypy default: this should be looking in numpy now Message-ID: <20131104065330.F2E521C01DC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67823:8625b69babe3 Date: 2013-11-04 01:52 -0500 http://bitbucket.org/pypy/pypy/changeset/8625b69babe3/ Log: this should be looking in numpy now diff --git a/pypy/module/micronumpy/tool/numready/main.py b/pypy/module/micronumpy/tool/numready/main.py --- a/pypy/module/micronumpy/tool/numready/main.py +++ b/pypy/module/micronumpy/tool/numready/main.py @@ -97,7 +97,7 @@ def main(argv): cpy_items = find_numpy_items("/usr/bin/python") - pypy_items = find_numpy_items(argv[1], "numpypy") + pypy_items = find_numpy_items(argv[1]) ver = get_version_str(argv[1]) all_items = [] From noreply at buildbot.pypy.org Mon Nov 4 07:59:35 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 4 Nov 2013 07:59:35 +0100 (CET) Subject: [pypy-commit] pypy default: Make test_ztranslation fail on space.wrap(complex(...)) Message-ID: <20131104065935.D328F1C01DC@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67824:66bf553d6439 Date: 2013-11-03 22:59 -0800 http://bitbucket.org/pypy/pypy/changeset/66bf553d6439/ Log: Make test_ztranslation fail on space.wrap(complex(...)) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -176,7 +176,7 @@ self._see_interp2app(x) if isinstance(x, GetSetProperty): self._see_getsetproperty(x) - if isinstance(x, r_singlefloat): + if isinstance(x, (r_singlefloat, complex)): self._wrap_not_rpython(x) if isinstance(x, list): if x == []: # special case: it is used e.g. in sys/__init__.py From noreply at buildbot.pypy.org Mon Nov 4 08:01:15 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 4 Nov 2013 08:01:15 +0100 (CET) Subject: [pypy-commit] pypy default: Revert, doesn't actually work Message-ID: <20131104070115.45E5E1C01DC@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67825:0d4f1f63b8a0 Date: 2013-11-03 23:00 -0800 http://bitbucket.org/pypy/pypy/changeset/0d4f1f63b8a0/ Log: Revert, doesn't actually work diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -176,7 +176,7 @@ self._see_interp2app(x) if isinstance(x, GetSetProperty): self._see_getsetproperty(x) - if isinstance(x, (r_singlefloat, complex)): + if isinstance(x, r_singlefloat): self._wrap_not_rpython(x) if isinstance(x, list): if x == []: # special case: it is used e.g. in sys/__init__.py From noreply at buildbot.pypy.org Mon Nov 4 08:32:35 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 4 Nov 2013 08:32:35 +0100 (CET) Subject: [pypy-commit] pypy default: The 'jitcounter' is another new object that is global, and that must be reset between tests in case they are Message-ID: <20131104073235.6F1341C0651@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67826:0ffd6995f846 Date: 2013-11-04 08:31 +0100 http://bitbucket.org/pypy/pypy/changeset/0ffd6995f846/ Log: The 'jitcounter' is another new object that is global, and that must be reset between tests in case they are using the same piece of compiled code. diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -6,7 +6,7 @@ import py from rpython.jit.metainterp import pyjitpl from rpython.jit.metainterp.test.support import LLJitMixin -from rpython.jit.metainterp.warmspot import reset_stats, get_stats +from rpython.jit.metainterp.warmspot import reset_jit, get_stats from pypy.module.micronumpy import interp_boxes from pypy.module.micronumpy.compile import FakeSpace, Parser, InterpreterState from pypy.module.micronumpy.base import W_NDimArray @@ -67,8 +67,7 @@ def run(self, name): self.compile_graph() - reset_stats() - pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() + reset_jit() i = self.code_mapping[name] retval = self.interp.eval_graph(self.graph, [i]) py.test.skip("don't run for now") @@ -147,8 +146,7 @@ def test_reduce_compile_only_once(self): self.compile_graph() - reset_stats() - pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() + reset_jit() i = self.code_mapping['reduce'] # run it twice retval = self.interp.eval_graph(self.graph, [i]) @@ -158,8 +156,7 @@ def test_reduce_axis_compile_only_once(self): self.compile_graph() - reset_stats() - pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() + reset_jit() i = self.code_mapping['axissum'] # run it twice retval = self.interp.eval_graph(self.graph, [i]) diff --git a/rpython/jit/metainterp/counter.py b/rpython/jit/metainterp/counter.py --- a/rpython/jit/metainterp/counter.py +++ b/rpython/jit/metainterp/counter.py @@ -151,3 +151,7 @@ def decay_all_counters(self): "NOT_RPYTHON" pass + + def _clear_all(self): + self.timetable.clear() + self.celltable.clear() diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -3,6 +3,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.jit.backend.llgraph import runner from rpython.jit.metainterp.warmspot import ll_meta_interp, get_stats +from rpython.jit.metainterp.warmspot import reset_stats from rpython.jit.metainterp.warmstate import unspecialize_value from rpython.jit.metainterp.optimizeopt import ALL_OPTS_DICT from rpython.jit.metainterp import pyjitpl, history, jitexc diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -164,6 +164,12 @@ def reset_stats(): pyjitpl._warmrunnerdesc.stats.clear() +def reset_jit(): + """Helper for some tests (see micronumpy/test/test_zjit.py)""" + reset_stats() + pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() + pyjitpl._warmrunnerdesc.jitcounter._clear_all() + def get_translator(): return pyjitpl._warmrunnerdesc.translator From noreply at buildbot.pypy.org Mon Nov 4 09:04:16 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 4 Nov 2013 09:04:16 +0100 (CET) Subject: [pypy-commit] pypy default: np.complex128 doesn't need a __complex__ method, simplify Message-ID: <20131104080416.D46E01D2370@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67827:53eec95199ea Date: 2013-11-04 02:55 -0500 http://bitbucket.org/pypy/pypy/changeset/53eec95199ea/ Log: np.complex128 doesn't need a __complex__ method, simplify diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -339,10 +339,6 @@ descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float64") class W_ComplexFloatingBox(W_InexactBox): - def descr_complex(self, space): - assert isinstance(self, ComplexBox) - return space.wrap(complex(self.real, self.imag)) - def descr_get_real(self, space): dtype = self._COMPONENTS_BOX._get_dtype(space) box = self.convert_real_to(dtype) @@ -648,7 +644,7 @@ __module__ = "numpy", __new__ = interp2app(W_Complex64Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex64Box.descr_reduce), - __complex__ = interp2app(W_ComplexFloatingBox.descr_complex), + __complex__ = interp2app(W_GenericBox.item), real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) @@ -657,7 +653,6 @@ __module__ = "numpy", __new__ = interp2app(W_Complex128Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex128Box.descr_reduce), - __complex__ = interp2app(W_ComplexFloatingBox.descr_complex), real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) @@ -673,7 +668,7 @@ __module__ = "numpy", __new__ = interp2app(W_ComplexLongBox.descr__new__.im_func), __reduce__ = interp2app(W_ComplexLongBox.descr_reduce), - __complex__ = interp2app(W_ComplexFloatingBox.descr_complex), + __complex__ = interp2app(W_GenericBox.item), real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -68,4 +68,5 @@ import numpy as np for tp in [np.csingle, np.cdouble, np.clongdouble]: x = tp(1+2j) + assert hasattr(x, '__complex__') == (tp != np.cdouble) assert complex(x) == 1+2j From noreply at buildbot.pypy.org Mon Nov 4 12:54:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 4 Nov 2013 12:54:06 +0100 (CET) Subject: [pypy-commit] pypy default: Add a test that dictionaries are type-erased correctly. This fails Message-ID: <20131104115406.D35D71C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67828:b5cd6e18901d Date: 2013-11-04 12:52 +0100 http://bitbucket.org/pypy/pypy/changeset/b5cd6e18901d/ Log: Add a test that dictionaries are type-erased correctly. This fails with rordereddict because of the LookupFamily. Managed to fix it by killing LookupFamily and replacing it with regular specialization. diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -4,6 +4,7 @@ from rpython.rtyper.rdict import AbstractDictRepr, AbstractDictIteratorRepr from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib import objectmodel, jit, rgc +from rpython.rlib.objectmodel import specialize from rpython.rlib.debug import ll_assert from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rtyper import rmodel @@ -44,20 +45,19 @@ DICT = lltype.typeOf(d).TO fun = d.lookup_function_no if fun == FUNC_BYTE: - return DICT.lookup_family.byte_lookup_function(d, key, hash, flag) + return ll_dict_lookup(d, key, hash, flag, TYPE_BYTE) elif fun == FUNC_SHORT: - return DICT.lookup_family.short_lookup_function(d, key, hash, flag) + return ll_dict_lookup(d, key, hash, flag, TYPE_SHORT) elif IS_64BIT and fun == FUNC_INT: - return DICT.lookup_family.int_lookup_function(d, key, hash, flag) + return ll_dict_lookup(d, key, hash, flag, TYPE_INT) elif fun == FUNC_LONG: - return DICT.lookup_family.long_lookup_function(d, key, hash, flag) + return ll_dict_lookup(d, key, hash, flag, TYPE_LONG) assert False def get_ll_dict(DICTKEY, DICTVALUE, get_custom_eq_hash=None, DICT=None, ll_fasthash_function=None, ll_hash_function=None, ll_eq_function=None, method_cache={}, - dummykeyobj=None, dummyvalueobj=None, rtyper=None, - setup_lookup_funcs=True): + dummykeyobj=None, dummyvalueobj=None, rtyper=None): # get the actual DICT type. if DICT is None, it's created, otherwise # forward reference is becoming DICT if DICT is None: @@ -147,52 +147,10 @@ adtmeths['lookup_function'] = lltype.staticAdtMethod(ll_call_lookup_function) adtmeths['allocate'] = lltype.typeMethod(_ll_malloc_dict) - family = LookupFamily() - adtmeths['lookup_family'] = family - DICT.become(lltype.GcStruct("dicttable", adtmeths=adtmeths, *fields)) - - family.empty_array = DICTENTRYARRAY.allocate(0) - if setup_lookup_funcs: - _setup_lookup_funcs(DICT, rtyper, family) return DICT -def _setup_lookup_funcs(DICT, rtyper, family): - DICTKEY = DICT.entries.TO.OF.key - LOOKUP_FUNC = lltype.Ptr(lltype.FuncType([lltype.Ptr(DICT), DICTKEY, - lltype.Signed, lltype.Signed], - lltype.Signed)) - - - STORECLEAN_FUNC = lltype.Ptr(lltype.FuncType([lltype.Ptr(DICT), - lltype.Signed, - lltype.Signed], - lltype.Void)) - - for name, T in [('byte', rffi.UCHAR), - ('short', rffi.USHORT), - ('int', rffi.UINT), - ('long', lltype.Unsigned)]: - if name == 'int' and not IS_64BIT: - continue - lookupfn, storecleanfn = new_lookup_functions(LOOKUP_FUNC, - STORECLEAN_FUNC, T=T, - rtyper=rtyper) - setattr(family, '%s_lookup_function' % name, lookupfn) - setattr(family, '%s_insert_clean_function' % name, storecleanfn) - -def llhelper_or_compile(rtyper, FUNCPTR, ll_func): - # the check is for pseudo rtyper from tests - if rtyper is None or not hasattr(rtyper, 'annotate_helper_fn'): - return llhelper(FUNCPTR, ll_func) - else: - return rtyper.annotate_helper_fn(ll_func, FUNCPTR.TO.ARGS) - -class LookupFamily: - def _freeze_(self): - return True - class OrderedDictRepr(AbstractDictRepr): @@ -249,17 +207,9 @@ s_key) kwd['dummyvalueobj'] = self.value_repr.get_ll_dummyval_obj( self.rtyper, s_value) - - kwd['setup_lookup_funcs'] = False get_ll_dict(DICTKEY, DICTVALUE, DICT=self.DICT, rtyper=self.rtyper, **kwd) - def _setup_repr_final(self): - if not self.finalized: - family = self.lowleveltype.TO.lookup_family - _setup_lookup_funcs(self.lowleveltype.TO, self.rtyper, family) - self.finalized = True - def convert_const(self, dictobj): from rpython.rtyper.lltypesystem import llmemory @@ -451,6 +401,10 @@ FUNC_BYTE, FUNC_SHORT, FUNC_INT, FUNC_LONG = range(4) else: FUNC_BYTE, FUNC_SHORT, FUNC_LONG = range(3) +TYPE_BYTE = rffi.UCHAR +TYPE_SHORT = rffi.USHORT +TYPE_INT = rffi.UINT +TYPE_LONG = lltype.Unsigned def ll_malloc_indexes_and_choose_lookup(d, n): if n <= 256: @@ -477,13 +431,13 @@ def ll_call_insert_clean_function(d, hash, i): DICT = lltype.typeOf(d).TO if d.lookup_function_no == FUNC_BYTE: - DICT.lookup_family.byte_insert_clean_function(d, hash, i) + ll_dict_store_clean(d, hash, i, TYPE_BYTE) elif d.lookup_function_no == FUNC_SHORT: - DICT.lookup_family.short_insert_clean_function(d, hash, i) + ll_dict_store_clean(d, hash, i, TYPE_SHORT) elif IS_64BIT and d.lookup_function_no == FUNC_INT: - DICT.lookup_family.int_insert_clean_function(d, hash, i) + ll_dict_store_clean(d, hash, i, TYPE_INT) elif d.lookup_function_no == FUNC_LONG: - DICT.lookup_family.long_insert_clean_function(d, hash, i) + ll_dict_store_clean(d, hash, i, TYPE_LONG) else: assert False @@ -738,31 +692,83 @@ FLAG_DELETE = 2 FLAG_DELETE_TRY_HARD = 3 -def new_lookup_functions(LOOKUP_FUNC, STORECLEAN_FUNC, T, rtyper=None): - INDEXES = lltype.Ptr(lltype.GcArray(T)) + at specialize.memo() +def _ll_ptr_to_array_of(T): + return lltype.Ptr(lltype.GcArray(T)) - def ll_kill_something(d): - i = 0 - indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes) - while True: - index = rffi.cast(lltype.Signed, indexes[i]) - if index >= VALID_OFFSET: +def ll_kill_something(d, T): + INDEXES = _ll_ptr_to_array_of(T) + i = 0 + indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes) + while True: + index = rffi.cast(lltype.Signed, indexes[i]) + if index >= VALID_OFFSET: + indexes[i] = rffi.cast(T, DELETED) + return index + i += 1 + + at jit.look_inside_iff(lambda d, key, hash, store_flag, T: + jit.isvirtual(d) and jit.isconstant(key)) +def ll_dict_lookup(d, key, hash, store_flag, T): + INDEXES = _ll_ptr_to_array_of(T) + entries = d.entries + indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes) + mask = len(indexes) - 1 + i = r_uint(hash & mask) + # do the first try before any looping + ENTRIES = lltype.typeOf(entries).TO + direct_compare = not hasattr(ENTRIES, 'no_direct_compare') + index = rffi.cast(lltype.Signed, indexes[intmask(i)]) + if index >= VALID_OFFSET: + checkingkey = entries[index - VALID_OFFSET].key + if direct_compare and checkingkey == key: + if store_flag == FLAG_DELETE: indexes[i] = rffi.cast(T, DELETED) - return index - i += 1 + return index - VALID_OFFSET # found the entry + if d.keyeq is not None and entries.hash(index - VALID_OFFSET) == hash: + # correct hash, maybe the key is e.g. a different pointer to + # an equal object + found = d.keyeq(checkingkey, key) + #llop.debug_print(lltype.Void, "comparing keys", ll_debugrepr(checkingkey), ll_debugrepr(key), found) + if d.paranoia: + if (entries != d.entries or lltype.cast_opaque_ptr(llmemory.GCREF, indexes) != d.indexes or + not entries.valid(index - VALID_OFFSET) or + entries[index - VALID_OFFSET].key != checkingkey): + # the compare did major nasty stuff to the dict: start over + return ll_dict_lookup(d, key, hash, store_flag, T) + if found: + if store_flag == FLAG_DELETE: + indexes[i] = rffi.cast(T, DELETED) + return index - VALID_OFFSET + deletedslot = -1 + elif index == DELETED: + deletedslot = intmask(i) + else: + # pristine entry -- lookup failed + if store_flag == FLAG_STORE: + indexes[i] = rffi.cast(T, d.num_used_items + VALID_OFFSET) + elif d.paranoia and store_flag == FLAG_DELETE_TRY_HARD: + return ll_kill_something(d, T) + return -1 - @jit.look_inside_iff(lambda d, key, hash, store_flag: - jit.isvirtual(d) and jit.isconstant(key)) - def ll_dict_lookup(d, key, hash, store_flag): - entries = d.entries - indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes) - mask = len(indexes) - 1 - i = r_uint(hash & mask) - # do the first try before any looping - ENTRIES = lltype.typeOf(entries).TO - direct_compare = not hasattr(ENTRIES, 'no_direct_compare') + # In the loop, a deleted entry (everused and not valid) is by far + # (factor of 100s) the least likely outcome, so test for that last. + perturb = r_uint(hash) + while 1: + # compute the next index using unsigned arithmetic + i = (i << 2) + i + perturb + 1 + i = i & mask index = rffi.cast(lltype.Signed, indexes[intmask(i)]) - if index >= VALID_OFFSET: + if index == FREE: + if store_flag == FLAG_STORE: + if deletedslot == -1: + deletedslot = intmask(i) + indexes[deletedslot] = rffi.cast(T, d.num_used_items + + VALID_OFFSET) + elif d.paranoia and store_flag == FLAG_DELETE_TRY_HARD: + return ll_kill_something(d, T) + return -1 + elif index >= VALID_OFFSET: checkingkey = entries[index - VALID_OFFSET].key if direct_compare and checkingkey == key: if store_flag == FLAG_DELETE: @@ -772,85 +778,34 @@ # correct hash, maybe the key is e.g. a different pointer to # an equal object found = d.keyeq(checkingkey, key) - #llop.debug_print(lltype.Void, "comparing keys", ll_debugrepr(checkingkey), ll_debugrepr(key), found) if d.paranoia: if (entries != d.entries or lltype.cast_opaque_ptr(llmemory.GCREF, indexes) != d.indexes or not entries.valid(index - VALID_OFFSET) or entries[index - VALID_OFFSET].key != checkingkey): # the compare did major nasty stuff to the dict: start over - return ll_dict_lookup(d, key, hash, store_flag) + return ll_dict_lookup(d, key, hash, store_flag, T) if found: if store_flag == FLAG_DELETE: indexes[i] = rffi.cast(T, DELETED) return index - VALID_OFFSET - deletedslot = -1 - elif index == DELETED: + elif deletedslot == -1: deletedslot = intmask(i) - else: - # pristine entry -- lookup failed - if store_flag == FLAG_STORE: - indexes[i] = rffi.cast(T, d.num_used_items + VALID_OFFSET) - elif d.paranoia and store_flag == FLAG_DELETE_TRY_HARD: - return ll_kill_something(d) - return -1 + perturb >>= PERTURB_SHIFT - # In the loop, a deleted entry (everused and not valid) is by far - # (factor of 100s) the least likely outcome, so test for that last. - perturb = r_uint(hash) - while 1: - # compute the next index using unsigned arithmetic - i = (i << 2) + i + perturb + 1 - i = i & mask - index = rffi.cast(lltype.Signed, indexes[intmask(i)]) - if index == FREE: - if store_flag == FLAG_STORE: - if deletedslot == -1: - deletedslot = intmask(i) - indexes[deletedslot] = rffi.cast(T, d.num_used_items + - VALID_OFFSET) - elif d.paranoia and store_flag == FLAG_DELETE_TRY_HARD: - return ll_kill_something(d) - return -1 - elif index >= VALID_OFFSET: - checkingkey = entries[index - VALID_OFFSET].key - if direct_compare and checkingkey == key: - if store_flag == FLAG_DELETE: - indexes[i] = rffi.cast(T, DELETED) - return index - VALID_OFFSET # found the entry - if d.keyeq is not None and entries.hash(index - VALID_OFFSET) == hash: - # correct hash, maybe the key is e.g. a different pointer to - # an equal object - found = d.keyeq(checkingkey, key) - if d.paranoia: - if (entries != d.entries or lltype.cast_opaque_ptr(llmemory.GCREF, indexes) != d.indexes or - not entries.valid(index - VALID_OFFSET) or - entries[index - VALID_OFFSET].key != checkingkey): - # the compare did major nasty stuff to the dict: start over - return ll_dict_lookup(d, key, hash, store_flag) - if found: - if store_flag == FLAG_DELETE: - indexes[i] = rffi.cast(T, DELETED) - return index - VALID_OFFSET - elif deletedslot == -1: - deletedslot = intmask(i) - perturb >>= PERTURB_SHIFT - - def ll_dict_store_clean(d, hash, index): - # a simplified version of ll_dict_lookup() which assumes that the - # key is new, and the dictionary doesn't contain deleted entries. - # It only finds the next free slot for the given hash. - indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes) - mask = len(indexes) - 1 - i = r_uint(hash & mask) - perturb = r_uint(hash) - while rffi.cast(lltype.Signed, indexes[i]) != 0: - i = (i << 2) + i + perturb + 1 - i = i & mask - perturb >>= PERTURB_SHIFT - indexes[i] = rffi.cast(T, index + VALID_OFFSET) - - return (llhelper_or_compile(rtyper, LOOKUP_FUNC, ll_dict_lookup), - llhelper_or_compile(rtyper, STORECLEAN_FUNC, ll_dict_store_clean)) +def ll_dict_store_clean(d, hash, index, T): + # a simplified version of ll_dict_lookup() which assumes that the + # key is new, and the dictionary doesn't contain deleted entries. + # It only finds the next free slot for the given hash. + INDEXES = _ll_ptr_to_array_of(T) + indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes) + mask = len(indexes) - 1 + i = r_uint(hash & mask) + perturb = r_uint(hash) + while rffi.cast(lltype.Signed, indexes[i]) != 0: + i = (i << 2) + i + perturb + 1 + i = i & mask + perturb >>= PERTURB_SHIFT + indexes[i] = rffi.cast(T, index + VALID_OFFSET) # ____________________________________________________________ # @@ -858,9 +813,15 @@ DICT_INITSIZE = 8 + + at specialize.memo() +def _ll_empty_array(DICT): + """Memo function: cache a single prebuilt allocated empty array.""" + return DICT.entries.TO.allocate(0) + def ll_newdict(DICT): d = DICT.allocate() - d.entries = DICT.lookup_family.empty_array + d.entries = _ll_empty_array(DICT) ll_malloc_indexes_and_choose_lookup(d, DICT_INITSIZE) d.num_items = 0 d.num_used_items = 0 @@ -1030,7 +991,7 @@ return DICT = lltype.typeOf(d).TO old_entries = d.entries - d.entries = DICT.lookup_family.empty_array + d.entries = _ll_empty_array(DICT) ll_malloc_indexes_and_choose_lookup(d, DICT_INITSIZE) d.num_items = 0 d.num_used_items = 0 diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -856,12 +856,28 @@ res = self.interpret(func, [42]) assert res == 42 + def test_externalvsinternal(self): + class A: pass + class B: pass + class C: pass + class D: pass + def func(): + d1 = self.newdict(); d1[A()] = B() + d2 = self.newdict2(); d2[C()] = D() + return (d1, d2) + res = self.interpret(func, []) + assert lltype.typeOf(res.item0) == lltype.typeOf(res.item1) + class TestRDict(BaseTestRDict): @staticmethod def newdict(): return {} + @staticmethod + def newdict2(): + return {} + def test_two_dicts_with_different_value_types(self): def func(i): d1 = {} diff --git a/rpython/rtyper/test/test_rordereddict.py b/rpython/rtyper/test/test_rordereddict.py --- a/rpython/rtyper/test/test_rordereddict.py +++ b/rpython/rtyper/test/test_rordereddict.py @@ -261,6 +261,10 @@ def newdict(): return OrderedDict() + @staticmethod + def newdict2(): + return OrderedDict() + def test_two_dicts_with_different_value_types(self): def func(i): d1 = OrderedDict() From noreply at buildbot.pypy.org Mon Nov 4 13:19:35 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 4 Nov 2013 13:19:35 +0100 (CET) Subject: [pypy-commit] stmgc default: implement stm_stop_all_other_threads() and Message-ID: <20131104121935.8120E1C00F8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r543:79aa5685d286 Date: 2013-11-04 13:15 +0100 http://bitbucket.org/pypy/stmgc/changeset/79aa5685d286/ Log: implement stm_stop_all_other_threads() and stm_partial_commit_and_resume_other_threads() diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -4,6 +4,7 @@ #include #include #include +#include #include "stmgc.h" #include "stmimpl.h" @@ -442,7 +443,7 @@ gcptr rare_events(gcptr p, gcptr _r, gcptr _sr) { check_public_ints(); - int k = get_rand(100); + int k = get_rand(200); if (k < 10) { push_roots(); stm_push_root(p); @@ -464,7 +465,24 @@ pop_public_int(); p = NULL; } - else if (k < 61 && DO_MAJOR_COLLECTS) { + else if (k < 61) { + push_roots(); + stm_push_root(p); + + stm_stop_all_other_threads(); + + p = stm_pop_root(); + p = write_barrier(p); + stm_push_root(p); + + sleep(0); + + stm_partial_commit_and_resume_other_threads(); + + p = stm_pop_root(); + pop_roots(); + } + else if (k < 62 && DO_MAJOR_COLLECTS) { fprintf(stdout, "major collect\n"); push_roots(); stmgcpage_possibly_major_collect(1); diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -1071,11 +1071,12 @@ return d->atomic; } -static void init_transaction(struct tx_descriptor *d) +static void init_transaction(struct tx_descriptor *d, int already_locked) { assert(d->atomic == 0); assert(*d->active_ref == 0); - stm_start_sharedlock(); + if (!already_locked) + stm_start_sharedlock(); assert(*d->active_ref == 0); if (clock_gettime(CLOCK_MONOTONIC, &d->start_real_time) < 0) { @@ -1096,7 +1097,7 @@ void stm_begin_transaction(void *buf, void (*longjmp_callback)(void *)) { struct tx_descriptor *d = thread_descriptor; - init_transaction(d); + init_transaction(d, 0); *d->active_ref = 1; d->setjmp_buf = buf; d->longjmp_callback = longjmp_callback; @@ -1425,13 +1426,14 @@ dprintf(("private_from_protected: clear (abort)\n")); } -void CommitTransaction(void) +void CommitTransaction(int stay_inevitable) { /* must save roots around this call */ revision_t cur_time; struct tx_descriptor *d = thread_descriptor; assert(*d->active_ref >= 1); assert(d->atomic == 0); - dprintf(("CommitTransaction(%p)\n", d)); + dprintf(("CommitTransaction(%d): %p\n", stay_inevitable, d)); + spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ if (d->public_descriptor->stolen_objects.size != 0) stm_normalize_stolen_objects(d); @@ -1445,7 +1447,11 @@ { stm_fatalerror("global_cur_time modified even though we are inev\n"); } - inev_mutex_release(); + + if (!stay_inevitable) { + /* we simply don't release the mutex. */ + inev_mutex_release(); + } } else { @@ -1503,7 +1509,8 @@ spinlock_release(d->public_descriptor->collection_lock); d->num_commits++; *d->active_ref = 0; - stm_stop_sharedlock(); + if (!stay_inevitable) + stm_stop_sharedlock(); /* clear the list of callbacks that would have been called on abort */ @@ -1568,13 +1575,25 @@ make_inevitable(d); /* cannot abort any more */ } -void BeginInevitableTransaction(void) +void BeginInevitableTransaction(int already_inevitable) { /* must save roots around this call */ struct tx_descriptor *d = thread_descriptor; revision_t cur_time; - init_transaction(d); - cur_time = acquire_inev_mutex_and_mark_global_cur_time(d); + init_transaction(d, already_inevitable); + + if (already_inevitable) { + cur_time = ACCESS_ONCE(global_cur_time); + assert((cur_time & 1) == 0); + if (!bool_cas(&global_cur_time, cur_time, cur_time + 1)) { + stm_fatalerror("there was a commit between a partial inevitable " + "commit and the continuation of the transaction\n"); + } + } + else { + cur_time = acquire_inev_mutex_and_mark_global_cur_time(d); + } + d->start_time = cur_time; make_inevitable(d); } diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -123,7 +123,8 @@ #define ABRT_VALIDATE_INEV 5 #define ABRT_COLLECT_MINOR 6 #define ABRT_COLLECT_MAJOR 7 -#define ABORT_REASONS 8 +#define ABRT_OTHER_THREADS 8 +#define ABORT_REASONS 9 #define ABORT_NAMES { "MANUAL", \ "COMMIT", \ "STOLEN_MODIFIED", \ @@ -132,6 +133,7 @@ "VALIDATE_INEV", \ "COLLECT_MINOR", \ "COLLECT_MAJOR", \ + "OTHER_THREADS", \ } #define SPLP_ABORT 0 @@ -207,8 +209,8 @@ /************************************************************/ -void BeginInevitableTransaction(void); /* must save roots around this call */ -void CommitTransaction(void); /* must save roots around this call */ +void BeginInevitableTransaction(int); /* must save roots around this call */ +void CommitTransaction(int); /* must save roots around this call */ void BecomeInevitable(const char *why); /* must save roots around this call */ void AbortTransaction(int); void AbortTransactionAfterCollect(struct tx_descriptor *, int); diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -1029,8 +1029,14 @@ if (ACCESS_ONCE(countdown_next_major_coll) > 0) return; - stm_start_single_thread(); - + /* in case we run in single_thread mode already and we are the + single thread, we must not try to enter it again. + This can happen after manually entering the mode by calling + stm_stop_all_other_threads(). */ + int single_threaded = in_single_thread == thread_descriptor; + if (!single_threaded) + stm_start_single_thread(); + /* If several threads were blocked on the previous line, the first one to proceed sees 0 in 'countdown_next_major_coll'. It's the thread that will do the major collection. Afterwards the other @@ -1039,7 +1045,8 @@ if (countdown_next_major_coll == 0) major_collect(); - stm_stop_single_thread(); + if (!single_threaded) + stm_stop_single_thread(); AbortNowIfDelayed(); } diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -195,6 +195,14 @@ /* only user currently is stm_allocate_public_integer_address() */ void stm_register_integer_address(intptr_t); +/* enter single-threaded mode. Used e.g. when patching assembler + code that mustn't be executed in another thread while being + patched. This can be used to atomically update non-transactional + memory. + These calls may collect! */ +void stm_stop_all_other_threads(void); +void stm_partial_commit_and_resume_other_threads(void); + /* macro functionality */ extern __thread gcptr *stm_shadowstack; diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -95,7 +95,7 @@ init_shadowstack(); stmgcpage_release_global_lock(); } - BeginInevitableTransaction(); + BeginInevitableTransaction(0); return token; } @@ -105,7 +105,7 @@ if (token == 1) stmgc_minor_collect(); /* force everything out of the nursery */ - CommitTransaction(); + CommitTransaction(0); if (token == 1) { stmgcpage_acquire_global_lock(); @@ -140,7 +140,7 @@ stm_push_root(END_MARKER_OFF); if (!thread_descriptor->atomic) - CommitTransaction(); + CommitTransaction(0); #ifdef _GC_ON_CPYTHON volatile PyThreadState *v_ts = PyGILState_GetThisThreadState(); @@ -192,7 +192,7 @@ assert(stm_shadowstack == v_saved_value + 2); if (!d->atomic) - CommitTransaction(); + CommitTransaction(0); counter = 0; } @@ -204,7 +204,7 @@ } } else { - BeginInevitableTransaction(); + BeginInevitableTransaction(0); } gcptr x = stm_pop_root(); /* pop the END_MARKER */ @@ -221,7 +221,7 @@ stm_possible_safe_point(); } else { - CommitTransaction(); + CommitTransaction(0); unsigned long limit = d->reads_size_limit_nonatomic; if (limit != 0 && limit < (stm_regular_length_limit >> 1)) @@ -246,7 +246,7 @@ { /* must save roots around this call */ struct tx_descriptor *d = thread_descriptor; if (!d->atomic) - CommitTransaction(); + CommitTransaction(0); else BecomeInevitable("stm_commit_transaction but atomic"); } @@ -255,7 +255,7 @@ { /* must save roots around this call */ struct tx_descriptor *d = thread_descriptor; if (!d->atomic) - BeginInevitableTransaction(); + BeginInevitableTransaction(0); } void stm_become_inevitable(const char *reason) @@ -278,7 +278,7 @@ static pthread_rwlock_t rwlock_shared = PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP; -static struct tx_descriptor *in_single_thread = NULL; /* for debugging */ +struct tx_descriptor *in_single_thread = NULL; void stm_start_sharedlock(void) { @@ -318,8 +318,45 @@ "pthread_rwlock_unlock failure\n"); } + +void stm_stop_all_other_threads(void) +{ /* push gc roots! */ + struct tx_descriptor *d; + + BecomeInevitable("stop_all_other_threads"); + stm_start_single_thread(); + + for (d = stm_tx_head; d; d = d->tx_next) { + if (*d->active_ref == 1) // && d != thread_descriptor) <- TRUE + AbortTransactionAfterCollect(d, ABRT_OTHER_THREADS); + } +} + + +void stm_partial_commit_and_resume_other_threads(void) +{ /* push gc roots! */ + struct tx_descriptor *d = thread_descriptor; + assert(*d->active_ref == 2); + int atomic = d->atomic; + + /* Give up atomicity during commit. This still works because + we keep the inevitable status, thereby being guaranteed to + commit before all others. */ + stm_atomic(-atomic); + + /* Commit and start new inevitable transaction while never + giving up the inevitable status. */ + CommitTransaction(1); /* 1=stay_inevitable! */ + BeginInevitableTransaction(1); + + /* restore atomic-count */ + stm_atomic(atomic); + + stm_stop_single_thread(); +} + void stm_start_single_thread(void) -{ +{ /* push gc roots! */ /* Called by the GC, just after a minor collection, when we need to do a major collection. When it returns, it acquired the "write lock" which prevents any other thread from running in a transaction. @@ -336,7 +373,7 @@ } void stm_stop_single_thread(void) -{ +{ /* push gc roots! */ /* Warning, may block waiting for rwlock_in_transaction while another thread runs a major GC */ assert(in_single_thread == thread_descriptor); diff --git a/c4/stmsync.h b/c4/stmsync.h --- a/c4/stmsync.h +++ b/c4/stmsync.h @@ -6,11 +6,14 @@ void stm_start_sharedlock(void); void stm_stop_sharedlock(void); +void stm_stop_all_other_threads(void); +void stm_partial_commit_and_resume_other_threads(void); void stm_start_single_thread(void); void stm_stop_single_thread(void); void stm_possible_safe_point(void); +extern struct tx_descriptor *in_single_thread; extern struct GcPtrList stm_prebuilt_gcroots; void stm_add_prebuilt_root(gcptr); void stm_clear_between_tests(void); From noreply at buildbot.pypy.org Mon Nov 4 13:20:32 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 4 Nov 2013 13:20:32 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: import stmgc with stop_all_other_threads() and partial commit for inevitable transactions Message-ID: <20131104122032.ED01D1C00F8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r67829:7384b3c8c0fc Date: 2013-11-04 13:16 +0100 http://bitbucket.org/pypy/pypy/changeset/7384b3c8c0fc/ Log: import stmgc with stop_all_other_threads() and partial commit for inevitable transactions diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -1072,11 +1072,12 @@ return d->atomic; } -static void init_transaction(struct tx_descriptor *d) +static void init_transaction(struct tx_descriptor *d, int already_locked) { assert(d->atomic == 0); assert(*d->active_ref == 0); - stm_start_sharedlock(); + if (!already_locked) + stm_start_sharedlock(); assert(*d->active_ref == 0); if (clock_gettime(CLOCK_MONOTONIC, &d->start_real_time) < 0) { @@ -1097,7 +1098,7 @@ void stm_begin_transaction(void *buf, void (*longjmp_callback)(void *)) { struct tx_descriptor *d = thread_descriptor; - init_transaction(d); + init_transaction(d, 0); *d->active_ref = 1; d->setjmp_buf = buf; d->longjmp_callback = longjmp_callback; @@ -1426,13 +1427,14 @@ dprintf(("private_from_protected: clear (abort)\n")); } -void CommitTransaction(void) +void CommitTransaction(int stay_inevitable) { /* must save roots around this call */ revision_t cur_time; struct tx_descriptor *d = thread_descriptor; assert(*d->active_ref >= 1); assert(d->atomic == 0); - dprintf(("CommitTransaction(%p)\n", d)); + dprintf(("CommitTransaction(%d): %p\n", stay_inevitable, d)); + spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ if (d->public_descriptor->stolen_objects.size != 0) stm_normalize_stolen_objects(d); @@ -1446,7 +1448,11 @@ { stm_fatalerror("global_cur_time modified even though we are inev\n"); } - inev_mutex_release(); + + if (!stay_inevitable) { + /* we simply don't release the mutex. */ + inev_mutex_release(); + } } else { @@ -1504,7 +1510,8 @@ spinlock_release(d->public_descriptor->collection_lock); d->num_commits++; *d->active_ref = 0; - stm_stop_sharedlock(); + if (!stay_inevitable) + stm_stop_sharedlock(); /* clear the list of callbacks that would have been called on abort */ @@ -1569,13 +1576,25 @@ make_inevitable(d); /* cannot abort any more */ } -void BeginInevitableTransaction(void) +void BeginInevitableTransaction(int already_inevitable) { /* must save roots around this call */ struct tx_descriptor *d = thread_descriptor; revision_t cur_time; - init_transaction(d); - cur_time = acquire_inev_mutex_and_mark_global_cur_time(d); + init_transaction(d, already_inevitable); + + if (already_inevitable) { + cur_time = ACCESS_ONCE(global_cur_time); + assert((cur_time & 1) == 0); + if (!bool_cas(&global_cur_time, cur_time, cur_time + 1)) { + stm_fatalerror("there was a commit between a partial inevitable " + "commit and the continuation of the transaction\n"); + } + } + else { + cur_time = acquire_inev_mutex_and_mark_global_cur_time(d); + } + d->start_time = cur_time; make_inevitable(d); } diff --git a/rpython/translator/stm/src_stm/et.h b/rpython/translator/stm/src_stm/et.h --- a/rpython/translator/stm/src_stm/et.h +++ b/rpython/translator/stm/src_stm/et.h @@ -124,7 +124,8 @@ #define ABRT_VALIDATE_INEV 5 #define ABRT_COLLECT_MINOR 6 #define ABRT_COLLECT_MAJOR 7 -#define ABORT_REASONS 8 +#define ABRT_OTHER_THREADS 8 +#define ABORT_REASONS 9 #define ABORT_NAMES { "MANUAL", \ "COMMIT", \ "STOLEN_MODIFIED", \ @@ -133,6 +134,7 @@ "VALIDATE_INEV", \ "COLLECT_MINOR", \ "COLLECT_MAJOR", \ + "OTHER_THREADS", \ } #define SPLP_ABORT 0 @@ -208,8 +210,8 @@ /************************************************************/ -void BeginInevitableTransaction(void); /* must save roots around this call */ -void CommitTransaction(void); /* must save roots around this call */ +void BeginInevitableTransaction(int); /* must save roots around this call */ +void CommitTransaction(int); /* must save roots around this call */ void BecomeInevitable(const char *why); /* must save roots around this call */ void AbortTransaction(int); void AbortTransactionAfterCollect(struct tx_descriptor *, int); diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c --- a/rpython/translator/stm/src_stm/gcpage.c +++ b/rpython/translator/stm/src_stm/gcpage.c @@ -1030,8 +1030,14 @@ if (ACCESS_ONCE(countdown_next_major_coll) > 0) return; - stm_start_single_thread(); - + /* in case we run in single_thread mode already and we are the + single thread, we must not try to enter it again. + This can happen after manually entering the mode by calling + stm_stop_all_other_threads(). */ + int single_threaded = in_single_thread == thread_descriptor; + if (!single_threaded) + stm_start_single_thread(); + /* If several threads were blocked on the previous line, the first one to proceed sees 0 in 'countdown_next_major_coll'. It's the thread that will do the major collection. Afterwards the other @@ -1040,7 +1046,8 @@ if (countdown_next_major_coll == 0) major_collect(); - stm_stop_single_thread(); + if (!single_threaded) + stm_stop_single_thread(); AbortNowIfDelayed(); } diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -89a1de501060 +79aa5685d286 diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -196,6 +196,14 @@ /* only user currently is stm_allocate_public_integer_address() */ void stm_register_integer_address(intptr_t); +/* enter single-threaded mode. Used e.g. when patching assembler + code that mustn't be executed in another thread while being + patched. This can be used to atomically update non-transactional + memory. + These calls may collect! */ +void stm_stop_all_other_threads(void); +void stm_partial_commit_and_resume_other_threads(void); + /* macro functionality */ extern __thread gcptr *stm_shadowstack; diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c --- a/rpython/translator/stm/src_stm/stmsync.c +++ b/rpython/translator/stm/src_stm/stmsync.c @@ -96,7 +96,7 @@ init_shadowstack(); stmgcpage_release_global_lock(); } - BeginInevitableTransaction(); + BeginInevitableTransaction(0); return token; } @@ -106,7 +106,7 @@ if (token == 1) stmgc_minor_collect(); /* force everything out of the nursery */ - CommitTransaction(); + CommitTransaction(0); if (token == 1) { stmgcpage_acquire_global_lock(); @@ -141,7 +141,7 @@ stm_push_root(END_MARKER_OFF); if (!thread_descriptor->atomic) - CommitTransaction(); + CommitTransaction(0); #ifdef _GC_ON_CPYTHON volatile PyThreadState *v_ts = PyGILState_GetThisThreadState(); @@ -193,7 +193,7 @@ assert(stm_shadowstack == v_saved_value + 2); if (!d->atomic) - CommitTransaction(); + CommitTransaction(0); counter = 0; } @@ -205,7 +205,7 @@ } } else { - BeginInevitableTransaction(); + BeginInevitableTransaction(0); } gcptr x = stm_pop_root(); /* pop the END_MARKER */ @@ -222,7 +222,7 @@ stm_possible_safe_point(); } else { - CommitTransaction(); + CommitTransaction(0); unsigned long limit = d->reads_size_limit_nonatomic; if (limit != 0 && limit < (stm_regular_length_limit >> 1)) @@ -247,7 +247,7 @@ { /* must save roots around this call */ struct tx_descriptor *d = thread_descriptor; if (!d->atomic) - CommitTransaction(); + CommitTransaction(0); else BecomeInevitable("stm_commit_transaction but atomic"); } @@ -256,7 +256,7 @@ { /* must save roots around this call */ struct tx_descriptor *d = thread_descriptor; if (!d->atomic) - BeginInevitableTransaction(); + BeginInevitableTransaction(0); } void stm_become_inevitable(const char *reason) @@ -279,7 +279,7 @@ static pthread_rwlock_t rwlock_shared = PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP; -static struct tx_descriptor *in_single_thread = NULL; /* for debugging */ +struct tx_descriptor *in_single_thread = NULL; void stm_start_sharedlock(void) { @@ -319,8 +319,45 @@ "pthread_rwlock_unlock failure\n"); } + +void stm_stop_all_other_threads(void) +{ /* push gc roots! */ + struct tx_descriptor *d; + + BecomeInevitable("stop_all_other_threads"); + stm_start_single_thread(); + + for (d = stm_tx_head; d; d = d->tx_next) { + if (*d->active_ref == 1) // && d != thread_descriptor) <- TRUE + AbortTransactionAfterCollect(d, ABRT_OTHER_THREADS); + } +} + + +void stm_partial_commit_and_resume_other_threads(void) +{ /* push gc roots! */ + struct tx_descriptor *d = thread_descriptor; + assert(*d->active_ref == 2); + int atomic = d->atomic; + + /* Give up atomicity during commit. This still works because + we keep the inevitable status, thereby being guaranteed to + commit before all others. */ + stm_atomic(-atomic); + + /* Commit and start new inevitable transaction while never + giving up the inevitable status. */ + CommitTransaction(1); /* 1=stay_inevitable! */ + BeginInevitableTransaction(1); + + /* restore atomic-count */ + stm_atomic(atomic); + + stm_stop_single_thread(); +} + void stm_start_single_thread(void) -{ +{ /* push gc roots! */ /* Called by the GC, just after a minor collection, when we need to do a major collection. When it returns, it acquired the "write lock" which prevents any other thread from running in a transaction. @@ -337,7 +374,7 @@ } void stm_stop_single_thread(void) -{ +{ /* push gc roots! */ /* Warning, may block waiting for rwlock_in_transaction while another thread runs a major GC */ assert(in_single_thread == thread_descriptor); diff --git a/rpython/translator/stm/src_stm/stmsync.h b/rpython/translator/stm/src_stm/stmsync.h --- a/rpython/translator/stm/src_stm/stmsync.h +++ b/rpython/translator/stm/src_stm/stmsync.h @@ -7,11 +7,14 @@ void stm_start_sharedlock(void); void stm_stop_sharedlock(void); +void stm_stop_all_other_threads(void); +void stm_partial_commit_and_resume_other_threads(void); void stm_start_single_thread(void); void stm_stop_single_thread(void); void stm_possible_safe_point(void); +extern struct tx_descriptor *in_single_thread; extern struct GcPtrList stm_prebuilt_gcroots; void stm_add_prebuilt_root(gcptr); void stm_clear_between_tests(void); From noreply at buildbot.pypy.org Mon Nov 4 13:20:34 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 4 Nov 2013 13:20:34 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: atomically patch assembler by stopping & aborting all other transactions Message-ID: <20131104122034.6B5831C00F8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r67830:d483af75a05b Date: 2013-11-04 13:18 +0100 http://bitbucket.org/pypy/pypy/changeset/d483af75a05b/ Log: atomically patch assembler by stopping & aborting all other transactions during raw patching. diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -770,8 +770,6 @@ rawstart) debug_bridge(descr_number, rawstart, codeendpos) self.patch_pending_failure_recoveries(rawstart) - # patch the jump from original guard - self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset frame_depth = max(self.current_clt.frame_info.jfi_frame_depth, frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) @@ -780,6 +778,14 @@ ops_offset=ops_offset, descr=faildescr) self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) + + if self.cpu.gc_ll_descr.stm: + rstm.stop_all_other_threads() + # patch the jump from original guard after the frame-depth update + self.patch_jump_for_descr(faildescr, rawstart) + if self.cpu.gc_ll_descr.stm: + rstm.partial_commit_and_resume_other_threads() + self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -1082,7 +1088,13 @@ assert mc.get_relative_pos() == 5 else: assert mc.get_relative_pos() <= 13 + # patch assembler: + if self.cpu.gc_ll_descr.stm: + rstm.stop_all_other_threads() mc.copy_to_raw_memory(oldadr) + if self.cpu.gc_ll_descr.stm: + rstm.partial_commit_and_resume_other_threads() + def dump(self, text): if not self.verbose: diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -7,6 +7,7 @@ from rpython.jit.backend.x86.profagent import ProfileAgent from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU from rpython.jit.backend.x86 import regloc +from rpython.rlib import rstm import sys @@ -127,6 +128,9 @@ def invalidate_loop(self, looptoken): from rpython.jit.backend.x86 import codebuf + if self.gc_ll_descr.stm: + rstm.stop_all_other_threads() + for addr, tgt in looptoken.compiled_loop_token.invalidate_positions: mc = codebuf.MachineCodeBlockWrapper() mc.JMP_l(tgt) @@ -134,6 +138,9 @@ mc.copy_to_raw_memory(addr - 1) # positions invalidated looptoken.compiled_loop_token.invalidate_positions = [] + + if self.gc_ll_descr.stm: + rstm.partial_commit_and_resume_other_threads() def get_all_loop_runs(self): l = lltype.malloc(LOOP_RUN_CONTAINER, diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -98,6 +98,8 @@ self.pop_roots(hop, livevars) gct_stm_become_inevitable = _gct_with_roots_pushed + gct_stm_stop_all_other_threads = _gct_with_roots_pushed + gct_stm_partial_commit_and_resume_other_threads = _gct_with_roots_pushed gct_stm_perform_transaction = _gct_with_roots_pushed gct_stm_allocate_nonmovable_int_adr = _gct_with_roots_pushed diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -34,6 +34,14 @@ llop.stm_become_inevitable(lltype.Void) @dont_look_inside +def stop_all_other_threads(): + llop.stm_stop_all_other_threads(lltype.Void) + + at dont_look_inside +def partial_commit_and_resume_other_threads(): + llop.stm_partial_commit_and_resume_other_threads(lltype.Void) + + at dont_look_inside def should_break_transaction(): return we_are_translated() and ( llop.stm_should_break_transaction(lltype.Bool)) diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -977,6 +977,8 @@ op_stm_major_collect = _stm_not_implemented op_stm_abort_and_retry = _stm_not_implemented op_stm_become_inevitable = _stm_not_implemented + op_stm_stop_all_other_threads = _stm_not_implemented + op_stm_partial_commit_and_resume_other_threads = _stm_not_implemented # __________________________________________________________ # operations on addresses diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -416,6 +416,8 @@ 'stm_allocate': LLOp(sideeffects=False, canmallocgc=True), 'stm_allocate_nonmovable_int_adr': LLOp(sideeffects=False, canmallocgc=True), 'stm_become_inevitable': LLOp(canmallocgc=True), + 'stm_stop_all_other_threads': LLOp(canmallocgc=True), + 'stm_partial_commit_and_resume_other_threads': LLOp(canmallocgc=True), 'stm_minor_collect': LLOp(canmallocgc=True), 'stm_major_collect': LLOp(canmallocgc=True), 'stm_get_tid': LLOp(canfold=True), diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -586,6 +586,8 @@ OP_STM_INITIALIZE = _OP_STM OP_STM_FINALIZE = _OP_STM OP_STM_BECOME_INEVITABLE = _OP_STM + OP_STM_STOP_ALL_OTHER_THREADS = _OP_STM + OP_STM_PARTIAL_COMMIT_AND_RESUME_OTHER_THREADS = _OP_STM OP_STM_BARRIER = _OP_STM OP_STM_PTR_EQ = _OP_STM OP_STM_PUSH_ROOT = _OP_STM diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -103,6 +103,12 @@ string_literal = c_string_constant(info) return 'stm_become_inevitable(%s);' % (string_literal,) +def stm_stop_all_other_threads(funcgen, op): + return 'stm_stop_all_other_threads();' + +def stm_partial_commit_and_resume_other_threads(funcgen, op): + return 'stm_partial_commit_and_resume_other_threads();' + def stm_push_root(funcgen, op): arg0 = funcgen.expr(op.args[0]) return 'stm_push_root((gcptr)%s);' % (arg0,) From noreply at buildbot.pypy.org Mon Nov 4 16:37:42 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 4 Nov 2013 16:37:42 +0100 (CET) Subject: [pypy-commit] buildbot numpy-tests: merge default into branch Message-ID: <20131104153742.CB2B41C13E5@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-tests Changeset: r871:c4230f19bdf0 Date: 2013-11-04 09:51 +0200 http://bitbucket.org/pypy/buildbot/changeset/c4230f19bdf0/ Log: merge default into branch diff too long, truncating to 2000 out of 2554 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -34,3 +34,5 @@ *-win-32 *-win-x86-32 *-win-x86-64 +slave/pypy-buildbot +master/pypy-buildbot diff --git a/README b/README --- a/README +++ b/README @@ -1,7 +1,6 @@ .. -*- mode: rst -*- -Everything has been tested with builbot 0.7.12. Not sure what happens with -other versions :-) +Everything has been tested with builbot 0.8.8. How to hack the PyPy buildbot ============================== @@ -24,12 +23,12 @@ If you want to run buildbot in production, you need to make sure that the function ``pypybuildbot.util.we_are_debugging`` returns ``False`` in your environment. At the moment of writing, debugging is enabled everywhere but on -wyvern. +cobra. You still need to fill ``master/slaveinfo.py`` with the passwords of the various slaves you want to use. -Then, to start the buildbot master: ``cd master; make start`` +Then, to start the buildbot master: ``buildbot start `` To restart the buildmaster @@ -43,13 +42,13 @@ $ buildbot checkconfig -$ make reconfig +$ buildbot reconfig OR -$ make stop +$ buildbot stop -$ make start +$ buildbot start To run a buildslave =================== diff --git a/bbhook/main.py b/bbhook/main.py --- a/bbhook/main.py +++ b/bbhook/main.py @@ -38,8 +38,9 @@ @app.route('/', methods=['POST']) def handle_payload(): - payload = json.loads(flask.request.form['payload']) + open('/tmp/payload', 'w').write(flask.request.form['payload']) try: + payload = json.loads(flask.request.form['payload']) from . import hook hook.handle(payload, test=app.testing) except: diff --git a/bot2/pypybuildbot/arm_master.py b/bot2/pypybuildbot/arm_master.py --- a/bot2/pypybuildbot/arm_master.py +++ b/bot2/pypybuildbot/arm_master.py @@ -56,7 +56,8 @@ + crosstranslationjitargs), platform='linux-armhf-raring', interpreter='pypy', - prefix=['schroot', '-c', 'raring']) + prefix=['schroot', '-c', 'raring'], + trigger='JITLINUXARMHF_RARING_scheduler') pypyARMJITTranslatedTestFactory = pypybuilds.TranslatedTests( translationArgs=(crosstranslationargs @@ -89,7 +90,17 @@ app_tests=True, platform='linux-armhf-raspbian', ) +pypyARMHF_RARING_JITTranslatedTestFactory = pypybuilds.TranslatedTests( + translationArgs=(crosstranslationargs + + jit_translation_args + + crosstranslationjitargs), + lib_python=True, + pypyjit=True, + app_tests=True, + platform='linux-armhf-raring', + ) # +LINUXARMHF = "own-linux-armhf" APPLVLLINUXARM = "pypy-c-app-level-linux-armel" APPLVLLINUXARMHF_v7 = "pypy-c-app-level-linux-armhf-v7" APPLVLLINUXARMHF_RASPBIAN = "pypy-c-app-level-linux-armhf-raspbian" @@ -97,6 +108,7 @@ JITLINUXARM = "pypy-c-jit-linux-armel" JITLINUXARMHF_v7 = "pypy-c-jit-linux-armhf-v7" JITLINUXARMHF_RASPBIAN = "pypy-c-jit-linux-armhf-raspbian" +JITLINUXARMHF_RARING = "pypy-c-jit-linux-armhf-raring" JITBACKENDONLYLINUXARMEL = "jitbackendonly-own-linux-armel" JITBACKENDONLYLINUXARMHF = "jitbackendonly-own-linux-armhf" @@ -109,6 +121,22 @@ BUILDJITLINUXARMHF_RASPBIAN = "build-pypy-c-jit-linux-armhf-raspbian" BUILDJITLINUXARMHF_RARING = "build-pypy-c-jit-linux-armhf-raring" +builderNames = [ + LINUXARMHF, + APPLVLLINUXARM, + APPLVLLINUXARMHF_v7, + APPLVLLINUXARMHF_RASPBIAN, + JITLINUXARM, + JITLINUXARMHF_v7, + JITLINUXARMHF_RASPBIAN, + JITBACKENDONLYLINUXARMEL, + JITBACKENDONLYLINUXARMHF, + JITBACKENDONLYLINUXARMHF_v7, + BUILDLINUXARM, + BUILDJITLINUXARM, + BUILDLINUXARMHF_RASPBIAN, + BUILDJITLINUXARMHF_RASPBIAN, +] schedulers = [ Nightly("nighly-arm-0-00", [ @@ -119,6 +147,8 @@ BUILDLINUXARM, # on hhu-cross-armel, uses 1 core BUILDLINUXARMHF_RASPBIAN, # on hhu-cross-raspbianhf, uses 1 core + LINUXARMHF, # onw tests on greenbox3-node0 + JITBACKENDONLYLINUXARMEL, # on hhu-imx.53 JITBACKENDONLYLINUXARMHF, JITBACKENDONLYLINUXARMHF_v7, # on cubieboard-bob @@ -140,6 +170,10 @@ JITLINUXARMHF_RASPBIAN, # triggered by BUILDJITLINUXARMHF_RASPBIAN JITLINUXARMHF_v7, # triggered by BUILDJITLINUXARMHF_RASPBIAN, on cubieboard-bob ]), + + Triggerable("JITLINUXARMHF_RARING_scheduler", [ + JITLINUXARMHF_RARING, # triggered by BUILDJITLINUXARMHF_RARING + ]) ] builders = [ @@ -163,6 +197,12 @@ "locks": [ARMBoardLock.access('counting')], }, ## armv7 + {"name": LINUXARMHF, + "slavenames": ["greenbox3-node0"], + "builddir": LINUXARMHF, + "factory": pypyOwnTestFactoryARM, + "category": 'linux-armhf', + }, {"name": JITBACKENDONLYLINUXARMHF_v7, "slavenames": ['cubieboard-bob'], "builddir": JITBACKENDONLYLINUXARMHF_v7, @@ -216,6 +256,12 @@ 'category': 'linux-armhf', "locks": [ARMBoardLock.access('counting')], }, + {"name": JITLINUXARMHF_RARING, + "slavenames": ["greenbox3-node0"], + 'builddir': JITLINUXARMHF_RARING, + 'factory': pypyARMHF_RARING_JITTranslatedTestFactory, + 'category': 'linux-armhf', + }, # Translation Builders for ARM {"name": BUILDLINUXARM, "slavenames": ['hhu-cross-armel'], diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -1,9 +1,12 @@ +from buildbot.steps.source.mercurial import Mercurial +from buildbot.process.buildstep import BuildStep from buildbot.process import factory from buildbot.steps import shell, transfer from buildbot.steps.trigger import Trigger from buildbot.process.properties import WithProperties from buildbot import locks from pypybuildbot.util import symlink_force +from buildbot.status.results import SKIPPED, SUCCESS import os # buildbot supports SlaveLocks, which can be used to limit the amout of builds @@ -27,10 +30,7 @@ # while the boards can only run one job at the same time ARMBoardLock = locks.SlaveLock('arm_boards', maxCount=1) - -# XXX monkey patch Trigger class, there are to issues with the list of renderables -# original: Trigger.renderables = [ 'set_propetries', 'scheduler', 'sourceStamp' ] -Trigger.renderables = [ 'set_properties', 'schedulerNames', 'sourceStamp' ] +map_branch_name = lambda x: x if x not in ['', None, 'default'] else 'trunk' class ShellCmd(shell.ShellCommand): # our own version that can distinguish abort cases (rc == -1) @@ -47,9 +47,7 @@ def start(self): properties = self.build.getProperties() - branch = properties['branch'] - if branch is None: - branch = 'trunk' + branch = map_branch_name(properties['branch']) #masterdest = properties.render(self.masterdest) masterdest = os.path.expanduser(self.masterdest) if branch.startswith('/'): @@ -86,21 +84,18 @@ def start(self): properties = self.build.getProperties() - branch = properties['branch'] - revision = properties['revision'] - - if branch is None: - branch = 'trunk' + branch = map_branch_name(properties['branch']) + revision = properties['final_file_name'] mastersrc = os.path.expanduser(self.mastersrc) if branch.startswith('/'): branch = branch[1:] mastersrc = os.path.join(mastersrc, branch) - if revision is not None: + if revision: basename = WithProperties(self.basename).getRenderingFor(self.build) basename = basename.replace(':', '-') else: - basename = self.basename.replace('%(revision)s', 'latest') + basename = self.basename.replace('%(final_file_name)s', 'latest') assert '%' not in basename self.mastersrc = os.path.join(mastersrc, basename) @@ -161,9 +156,7 @@ builder.summary_by_branch_and_revision = {} try: rev = properties['got_revision'] - branch = properties['branch'] - if branch is None: - branch = 'trunk' + branch = map_branch_name(properties['branch']) if branch.endswith('/'): branch = branch[:-1] except KeyError: @@ -177,31 +170,21 @@ builder.saveYourself() # _______________________________________________________________ - -class UpdateCheckout(ShellCmd): - description = 'hg update' - command = 'UNKNOWN' - - def __init__(self, workdir=None, haltOnFailure=True, force_branch=None, - **kwargs): - ShellCmd.__init__(self, workdir=workdir, haltOnFailure=haltOnFailure, - **kwargs) - self.force_branch = force_branch - self.addFactoryArguments(force_branch=force_branch) - - def start(self): - if self.force_branch is not None: - branch = self.force_branch - # Note: We could add a warning to the output if we - # ignore the branch set by the user. - else: - properties = self.build.getProperties() - branch = properties['branch'] or 'default' - command = ["hg", "update", "--clean", "-r", branch] - self.setCommand(command) - ShellCmd.start(self) - - +# XXX Currently the build properties got_revision and final_file_name contain +# the revision number and the changeset-id, CheckGotRevision takes care to set +# the corresponding build properties +# rev:changeset for got_revision +# rev-changeset for final_file_name +# +# The rev part of got_revision and filename is used everywhere to sort the +# builds, i.e. on the summary and download pages. +# +# The rev part is strictly local and needs to be removed from the SourceStamp, +# at least for decoupled builds, which is what ParseRevision does. +# +# XXX in general it would be nice to drop the revision-number using only the +# changeset-id for got_revision and final_file_name and sorting the builds +# chronologically class UpdateGitCheckout(ShellCmd): description = 'git checkout' command = 'UNKNOWN' @@ -244,12 +227,51 @@ # ':' should not be part of filenames --- too many issues self.build.setProperty('got_revision', got_revision, 'got_revision') - self.build.setProperty('final_file_name', final_file_name, - 'got_revision') + if not self.build.hasProperty('final_file_name'): + self.build.setProperty('final_file_name', final_file_name, + 'got_revision') +class ParseRevision(BuildStep): + """Parse the revision property of the source stamp and extract the global + part of the revision + 123:3a34 -> 3a34""" + name = "parse_revision" -def update_hg(platform, factory, repourl, workdir, use_branch, - force_branch=None): + def __init__(self, *args, **kwargs): + BuildStep.__init__(self, *args, **kwargs) + + @staticmethod + def hideStepIf(results, step): + return results==SKIPPED + + @staticmethod + def doStepIf(step): + revision = step.build.getSourceStamp().revision + return isinstance(revision, (unicode, str)) and ':' in revision + + def start(self): + stamp = self.build.getSourceStamp() + revision = stamp.revision if stamp.revision is not None else '' + # + if not isinstance(revision, (unicode, str)) or ":" not in revision: + self.finished(SKIPPED) + return + # + self.build.setProperty('original_revision', revision, 'parse_revision') + self.build.setProperty('final_file_name', + revision.replace(':', '-'), 'parse_revision') + # + parts = revision.split(':') + self.build.setProperty('revision', parts[1], 'parse_revision') + stamp.revision = parts[1] + self.finished(SUCCESS) + + +def update_hg_old_method(platform, factory, repourl, workdir): + # baaaaaah. Seems that the Mercurial class doesn't support + # updating to a different branch than the one specified by + # the user (like "default"). This is nonsense if we need + # an auxiliary check-out :-( At least I didn't find how. if platform == 'win32': command = "if not exist .hg rmdir /q /s ." else: @@ -280,14 +302,27 @@ command="hg pull", workdir=workdir)) # - if use_branch or force_branch: - factory.addStep(UpdateCheckout(workdir=workdir, - haltOnFailure=True, - force_branch=force_branch)) - else: - factory.addStep(ShellCmd(description="hg update", - command=WithProperties("hg update --clean %(revision)s"), - workdir=workdir)) + # here, update without caring about branches + factory.addStep(ShellCmd(description="hg update", + command=WithProperties("hg update --clean %(revision)s"), + workdir=workdir)) + +def update_hg(platform, factory, repourl, workdir, use_branch, + force_branch=None): + if not use_branch: + assert force_branch is None + update_hg_old_method(platform, factory, repourl, workdir) + return + factory.addStep( + Mercurial( + repourl=repourl, + mode='full', + method='fresh', + defaultBranch=force_branch, + branchType='inrepo', + clobberOnBranchChange=False, + workdir=workdir, + logEnviron=False)) def update_git(platform, factory, repourl, workdir, use_branch, force_branch=None): @@ -340,11 +375,15 @@ # for debugging repourl = '/home/antocuni/pypy/default' # + factory.addStep(ParseRevision(hideStepIf=ParseRevision.hideStepIf, + doStepIf=ParseRevision.doStepIf)) + # update_hg(platform, factory, repourl, workdir, use_branch=True, force_branch=force_branch) # factory.addStep(CheckGotRevision(workdir=workdir)) + def build_name(platform, jit=False, flags=[], placeholder=None): if placeholder is None: placeholder = '%(final_file_name)s' @@ -524,7 +563,7 @@ command=['rm', '-rf', 'pypy-c'], workdir='.')) extension = get_extension(platform) - name = build_name(platform, pypyjit, translationArgs, placeholder='%(revision)s') + extension + name = build_name(platform, pypyjit, translationArgs, placeholder='%(final_file_name)s') + extension self.addStep(PyPyDownload( basename=name, mastersrc='~/nightly', @@ -538,22 +577,37 @@ self.addStep(ShellCmd( description="decompress pypy-c", command=['tar', '--extract', '--file=pypy_build'+ extension, '--strip-components=1', '--directory=.'], - workdir='pypy-c')) + workdir='pypy-c', + haltOnFailure=True, + )) + self.addStep(ShellCmd( + description="reset permissions", + command=['chmod', 'u+rw', '-R', 'build/include'], + haltOnFailure=True, + workdir='.')) # copy pypy-c to the expected location within the pypy source checkout self.addStep(ShellCmd( description="move pypy-c", command=['cp', '-v', 'pypy-c/bin/pypy', 'build/pypy/goal/pypy-c'], + haltOnFailure=True, workdir='.')) # copy generated and copied header files to build/include self.addStep(ShellCmd( description="move header files", command=['cp', '-vr', 'pypy-c/include', 'build'], + haltOnFailure=True, workdir='.')) # copy ctypes_resource_cache generated during translation self.addStep(ShellCmd( + description="reset permissions", + command=['chmod', 'u+rw', '-R', 'build/lib_pypy'], + haltOnFailure=True, + workdir='.')) + self.addStep(ShellCmd( description="move ctypes resource cache", command=['cp', '-rv', 'pypy-c/lib_pypy/ctypes_config_cache', 'build/lib_pypy'], + haltOnFailure=True, workdir='.')) add_translated_tests(self, prefix, platform, app_tests, lib_python, pypyjit) @@ -578,6 +632,7 @@ command=prefix + ["python", "pypy/tool/release/package.py", ".", WithProperties(name), 'pypy', '.'], + haltOnFailure=True, workdir='build')) nightly = '~/nightly/' extension = get_extension(platform) @@ -587,7 +642,7 @@ basename=name + extension, workdir='.', blocksize=100 * 1024)) - if trigger: # if provided trigger schedulers that are depend on this one + if trigger: # if provided trigger schedulers that depend on this one self.addStep(Trigger(schedulerNames=[trigger])) @@ -595,10 +650,11 @@ def __init__(self, platform='linux', host='tannit', postfix=''): factory.BuildFactory.__init__(self) - setup_steps(platform, self) # repourl = 'https://bitbucket.org/pypy/benchmarks' update_hg(platform, self, repourl, 'benchmarks', use_branch=False) + # + setup_steps(platform, self) if host == 'tannit': lock = TannitCPU elif host == 'speed_python': @@ -676,14 +732,14 @@ ''' factory.BuildFactory.__init__(self) + # check out and update benchmarks + repourl = 'https://bitbucket.org/pypy/benchmarks' + update_hg(platform, self, repourl, 'benchmarks', use_branch=False) + # checks out and updates the repo setup_steps(platform, self, repourl='http://hg.python.org/cpython', force_branch=branch) - # check out and update benchmarks - repourl = 'https://bitbucket.org/pypy/benchmarks' - update_hg(platform, self, repourl, 'benchmarks', use_branch=False) - lock = SpeedPythonCPU self.addStep(ShellCmd( @@ -736,6 +792,39 @@ masterdest=WithProperties(resultfile), workdir=".")) +class PyPyBuildbotTestFactory(factory.BuildFactory): + def __init__(self): + factory.BuildFactory.__init__(self) + # clone + self.addStep( + Mercurial( + repourl='https://bitbucket.org/pypy/buildbot', + mode='incremental', + method='fresh', + defaultBranch='default', + branchType='inrepo', + clobberOnBranchChange=False, + logEnviron=False)) + # create a virtualenv + self.addStep(ShellCmd( + description='create virtualenv', + haltOnFailure=True, + command='virtualenv ../venv')) + # install deps + self.addStep(ShellCmd( + description="install dependencies", + haltOnFailure=True, + command=('../venv/bin/pip install -r requirements.txt').split())) + # run tests + self.addStep(PytestCmd( + description="pytest buildbot", + haltOnFailure=True, + command=["../venv/bin/py.test", + "--resultlog=testrun.log", + ], + logfiles={'pytestLog': 'testrun.log'})) + + class NativeNumpyTests(factory.BuildFactory): ''' Download a pypy nightly, install nose and numpy, and run the numpy test suite diff --git a/bot2/pypybuildbot/ircbot.py b/bot2/pypybuildbot/ircbot.py --- a/bot2/pypybuildbot/ircbot.py +++ b/bot2/pypybuildbot/ircbot.py @@ -7,54 +7,64 @@ the customized IRC messages. """ -import re -from buildbot.status.words import Contact, IRC, log +from buildbot.status.words import IRC, log, IRCContact +# see http://www.mirc.com/colors.html USE_COLOR_CODES = True -GREEN = '\x033' -RED = '\x034' -AZURE = '\x0311' -BLUE = '\x0312' -PURPLE = '\x0313' -GRAY = '\x0315' -BOLD = '\x02' -def color(code, s): +BOLD = '\x02' +COLORS = { + 'WHITE': '\x030', + 'BLACK': '\x031', + 'GREEN': '\x033', + 'RED': '\x034', + 'AZURE': '\x0311', + 'BLUE': '\x0312', + 'PURPLE': '\x0313', + 'GRAY': '\x0315', +} + + +def color(s, code=None, bold=False): if USE_COLOR_CODES: - return '%s%s\x0F' % (code, s) + c = BOLD if bold else '' + if code in COLORS: + c += COLORS[code] + return '%s%s\x0F' % (c, s) return s -def extract_username(build): - regexp = r"The web-page 'force build' button was pressed by '(.*)': .*" - match = re.match(regexp, build.getReason()) - if match: - return match.group(1) - return None + +def get_build_information(build): + owner = build.getProperty("owner") + reason = build.getProperty("reason") + return ": ".join(k for k in (owner, reason) if k) def get_description_for_build(url, build): - url = color(GRAY, url) # in gray + url = color(url, 'GRAY') # in gray infos = [] - username = extract_username(build) - if username: - infos.append(color(BLUE, username)) # in blue + buildinfo = get_build_information(build) + if buildinfo: + infos.append(color(buildinfo, 'BLUE')) # in blue # - branch = build.source.branch + branch = build.getProperty('branch') if branch: - infos.append(color(BOLD, branch)) # in bold + infos.append(color(branch, bold=True)) # in bold # if infos: return '%s [%s]' % (url, ', '.join(infos)) else: return url + def buildStarted(self, builderName, build): builder = build.getBuilder() - log.msg('[Contact] Builder %r in category %s started' % (builder, builder.category)) + log.msg('[Contact] Builder %r in category %s started' % + (builder, builder.category)) # only notify about builders we are interested in - if (self.channel.categories != None and - builder.category not in self.channel.categories): + if (self.bot.categories is not None and + builder.category not in self.bot.categories): log.msg('Not notifying for a build in the wrong category') return @@ -62,7 +72,7 @@ log.msg('Not notifying for a build when started-notification disabled') return - buildurl = self.channel.status.getURLForThing(build) + buildurl = self.bot.status.getURLForThing(build) descr = get_description_for_build(buildurl, build) msg = "Started: %s" % descr self.send(msg) @@ -72,29 +82,28 @@ builder = build.getBuilder() # only notify about builders we are interested in - log.msg('[Contact] builder %r in category %s finished' % (builder, builder.category)) + log.msg('[Contact] builder %r in category %s finished' % + (builder, builder.category)) - if (self.channel.categories != None and - builder.category not in self.channel.categories): + if (self.bot.categories is not None and + builder.category not in self.bot.categories): return if not self.notify_for_finished(build): return - buildurl = self.channel.status.getURLForThing(build) + buildurl = self.bot.status.getURLForThing(build) descr = get_description_for_build(buildurl, build) - result = self.results_descriptions.get(build.getResults(), "Finished ??") - if result == 'Success': - result = color(BOLD+GREEN, result) - elif result == 'Exception': - result = color(BOLD+PURPLE, result) - else: - result = color(BOLD+RED, result) + result, c = self.results_descriptions.get(build.getResults(), + ("Finished ??", 'RED')) + if c not in COLORS: + c = 'RED' + result = color(result, c, bold=True) msg = "%s: %s" % (result, descr) self.send(msg) -Contact.buildStarted = buildStarted -Contact.buildFinished = buildFinished +IRCContact.buildStarted = buildStarted +IRCContact.buildFinished = buildFinished ## def send_message(message, test=False): diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -1,25 +1,22 @@ import os -import getpass -from buildbot.scheduler import Nightly, Triggerable +from buildbot.scheduler import Nightly +from buildbot.schedulers.forcesched import ForceScheduler +from buildbot.schedulers.forcesched import ValidationError from buildbot.buildslave import BuildSlave from buildbot.status.html import WebStatus -from buildbot.process.builder import Builder #from buildbot import manhole from pypybuildbot.pypylist import PyPyList, NumpyStatusList -from pypybuildbot.ircbot import IRC # side effects +from pypybuildbot.ircbot import IRC # side effects from pypybuildbot.util import we_are_debugging # Forbid "force build" with empty user name -from buildbot.status.web.builder import StatusResourceBuilder -def my_force(self, req, *args, **kwds): - name = req.args.get("username", [""])[0] - assert name, "Please write your name in the corresponding field." - return _previous_force(self, req, *args, **kwds) -_previous_force = StatusResourceBuilder.force -if _previous_force.__name__ == 'force': - StatusResourceBuilder.force = my_force -# Done +class CustomForceScheduler(ForceScheduler): + def force(self, owner, builder_name, **kwargs): + if not owner: + raise ValidationError, "Please write your name in the corresponding field." + return ForceScheduler.force(self, owner, builder_name, **kwargs) + if we_are_debugging(): channel = '#buildbot-test' @@ -172,6 +169,7 @@ JITLINUX32 = "pypy-c-jit-linux-x86-32" JITLINUX64 = "pypy-c-jit-linux-x86-64" JITMACOSX64 = "pypy-c-jit-macosx-x86-64" +JITMACOSX64_2 = "pypy-c-jit-macosx-x86-64-2" JITWIN32 = "pypy-c-jit-win-x86-32" JITWIN64 = "pypy-c-jit-win-x86-64" JITFREEBSD764 = 'pypy-c-jit-freebsd-7-x86-64' @@ -185,6 +183,8 @@ JITBENCH64_2 = 'jit-benchmark-linux-x86-64-2' CPYTHON_64 = "cpython-2-benchmark-x86-64" NUMPY_64 = "numpy-compatability-linux-x86-64" +# buildbot builder +PYPYBUILDBOT = 'pypy-buildbot' extra_opts = {'xerxes': {'keepalive_interval': 15}, 'aurora': {'max_builds': 1}, @@ -218,12 +218,14 @@ JITFREEBSD864, # on ananke JITFREEBSD964, # on exarkun's freebsd JITMACOSX64, # on xerxes - ], branch=None, hour=0, minute=0), + # buildbot selftest + PYPYBUILDBOT # on cobra + ], branch='default', hour=0, minute=0), Nightly("nightly-2-00", [ JITBENCH, # on tannit32, uses 1 core (in part exclusively) JITBENCH64, # on tannit64, uses 1 core (in part exclusively) - ], branch=None, hour=2, minute=0), + ], branch='default', hour=2, minute=0), Nightly("nightly-2-00-py3k", [ LINUX64, # on allegro64, uses all cores @@ -233,6 +235,38 @@ Nightly("nighly-ppc", [ JITONLYLINUXPPC64, # on gcc1 ], branch='ppc-jit-backend', hour=1, minute=0), + CustomForceScheduler('Force Scheduler', + builderNames=[ + PYPYBUILDBOT, + LINUX32, + LINUX64, + INDIANA32, + + MACOSX32, + WIN32, + WIN64, + APPLVLLINUX32, + APPLVLLINUX64, + APPLVLWIN32, + + LIBPYTHON_LINUX32, + LIBPYTHON_LINUX64, + + JITLINUX32, + JITLINUX64, + JITMACOSX64, + JITMACOSX64_2, + JITWIN32, + JITWIN64, + JITFREEBSD764, + JITFREEBSD864, + JITFREEBSD964, + JITINDIANA32, + + JITONLYLINUXPPC64, + JITBENCH, + JITBENCH64, + ] + ARM.builderNames, properties=[]), ] + ARM.schedulers, 'status': [status, ircbot], @@ -322,11 +356,17 @@ "category": 'mac32' }, {"name" : JITMACOSX64, - "slavenames": ["xerxes"], + "slavenames": ["xerxes", "tosh"], 'builddir' : JITMACOSX64, 'factory' : pypyJITTranslatedTestFactoryOSX64, 'category' : 'mac64', }, + {"name" : JITMACOSX64_2, + "slavenames": ["xerxes", "tosh"], + 'builddir' : JITMACOSX64_2, + 'factory' : pypyJITTranslatedTestFactoryOSX64, + 'category' : 'mac64', + }, {"name": WIN32, "slavenames": ["aurora", "SalsaSalsa"], "builddir": WIN32, @@ -401,6 +441,13 @@ 'factory': pypyNumpyCompatability, 'category': 'numpy', }, + {'name': PYPYBUILDBOT, + 'slavenames': ['cobra'], + 'builddir': PYPYBUILDBOT, + 'factory': pypybuilds.PyPyBuildbotTestFactory(), + 'category': 'buildbot', + } + ] + ARM.builders, # http://readthedocs.org/docs/buildbot/en/latest/tour.html#debugging-with-manhole diff --git a/bot2/pypybuildbot/summary.py b/bot2/pypybuildbot/summary.py --- a/bot2/pypybuildbot/summary.py +++ b/bot2/pypybuildbot/summary.py @@ -374,7 +374,7 @@ def _start_cat_branch(self, cat_branch, fine=False): category, branch = cat_branch - branch = trunk_name(branch) + branch = meta_branch_name(branch) category = category_name(category) self.cur_cat_branch = (category, branch) @@ -615,14 +615,19 @@ return lambda v: v in membs def make_subst(v1, v2): + if not isinstance(v1, list): + v1 = [v1] def subst(v): - if v == v1: + if v in v1: return v2 return v return subst -trunk_name = make_subst(None, "") -trunk_value = make_subst("", None) +# Map certain branch names from SourceStamps to a common name shown on the page +meta_branch_name = make_subst(['default', '', None], '') +# map the meta-branch to the actual branch entries from the +# SourceStamp +default_value = make_subst('', ['default', '', None]) category_name = make_subst(None, '-') nocat_value = make_subst("-", None) @@ -661,8 +666,7 @@ def getTitle(self, request): status = self.getStatus(request) - return "%s: summaries of last %d revisions" % (status.getProjectName(), - N) + return "%s: summaries of last %d revisions" % (status.getTitle(), N) @staticmethod def _prune_runs(runs, cutnum): @@ -686,8 +690,10 @@ except KeyError: pass builder = status.botmaster.builders[builderName] + factory = builder.config.factory branch = None - for _, kw in builder.buildFactory.steps: + for step in factory.steps: + kw = step.kwargs if 'defaultBranch' in kw: if kw.get('explicitBranch'): branch = kw['defaultBranch'] @@ -722,7 +728,6 @@ only_builder or only_branches) cat_branches = {} - for builderName in status.getBuilderNames(only_categories): if not test_builder(builderName): continue @@ -740,6 +745,8 @@ for build in builditer: if prune_old and self._age(build) > 7: continue + if self._age(build) > 60: # two months old: prune anyway + continue branch = self._get_branch(status, build) if not test_branch(branch): continue @@ -747,6 +754,7 @@ if not test_rev(got_rev): continue + branch = meta_branch_name(branch) cat_branch = (builderStatus.category, branch) runs, no_revision_builds = cat_branches.setdefault(cat_branch, @@ -825,7 +833,13 @@ only_branches = request.args.get('branch', None) only_recentrevs = request.args.get('recentrev', None) if only_branches is not None: - only_branches = map(trunk_value, only_branches) + branches = [] + for x in map(default_value, only_branches): + if isinstance(x, str): + branches.append(x) + else: + branches.extend(x) + only_branches = branches only_builder = request.args.get('builder', None) only_builds = None if only_builder is not None: @@ -861,16 +875,16 @@ outcome_set_cache.stats())) if request.args: - trunk_vs_any_text = "filter nothing" - trunk_vs_any_query = "" + default_vs_any_text = "filter nothing" + default_vs_any_query = "" else: - trunk_vs_any_text = "all " - trunk_vs_any_query = "?branch=" + default_vs_any_text = "all " + default_vs_any_query = "?branch=" - trunk_vs_any_anchor = html.a(trunk_vs_any_text, + default_vs_any_anchor = html.a(default_vs_any_text, href="/summary%s" % - trunk_vs_any_query, + default_vs_any_query, class_="failSummary trunkVsAny") - trunk_vs_any = html.div(trunk_vs_any_anchor, + default_vs_any = html.div(default_vs_any_anchor, style="position: absolute; right: 5%;") - return trunk_vs_any.unicode() + page.render() + return default_vs_any.unicode() + page.render() diff --git a/bot2/pypybuildbot/test/test_builds.py b/bot2/pypybuildbot/test/test_builds.py --- a/bot2/pypybuildbot/test/test_builds.py +++ b/bot2/pypybuildbot/test/test_builds.py @@ -4,53 +4,83 @@ class FakeProperties(object): - def __init__(self): - from buildbot.process.properties import PropertyMap - self.pmap = PropertyMap(self) - + sources = {} + + def __init__(self, properties=None): + if properties is None: + self.properties = {'branch':None, 'got_revision': 123, + 'final_file_name': '123-ea5ca8'} + else: + self.properties = properties + def __getitem__(self, item): - if item == 'branch': - return None - if item == 'got_revision': - return 123 - if item == 'final_file_name': - return '123-ea5ca8' - + return self.properties.get(item) + + def __setitem__(self, name, value): + self.properties[name] = value + def render(self, x): return x +class FakeSourceStamp(object): + def __init__(self, properties=None): + self.properties = properties if properties is not None else {} + + def __getattr__(self, name): + return self.properties.get(name) + + def __setattribute__(self, name, value): + self.properties[name] = value + class FakeBuild(object): slaveEnvironment = None - def __init__(self): - self.properties = FakeProperties() - + def __init__(self, properties=None): + self.properties = FakeProperties(properties) + self.source_stamp = FakeSourceStamp(properties) + def getProperties(self): return self.properties + def setProperty(self, name, value, source): + self.properties[name] = value + self.properties.sources[name] = source + def getSlaveCommandVersion(self, *args): return 3 + def getSourceStamp(self, *args): + return self.source_stamp + class FakeStepStatus(object): def setText(self, *args): pass + def stepFinished(self, results): + self.results = results + + def setHidden(self, *args): + pass + class FakeDeferred(object): + def callback(*args): + pass def addCallback(self, *args): return FakeDeferred() def addErrback(self, *args): return FakeDeferred() def test_Translate(): - expected = ['translate.py', '--batch', '-O0', + expected = ['pypy', '../../rpython/bin/rpython', '--batch', '-O0', 'targetpypystandalone', '--no-allworkingmodules'] translateInst = builds.Translate(['-O0'], ['--no-allworkingmodules']) assert translateInst.command[-len(expected):] == expected - translateFactory, kw = translateInst.factory - rebuiltTranslate = translateFactory(**kw) + translateFactory = translateInst._getStepFactory().factory + args = translateInst._getStepFactory().args + rebuiltTranslate = translateFactory(*args) assert rebuiltTranslate.command[-len(expected):] == expected @@ -64,7 +94,8 @@ inst = builds.PyPyUpload(slavesrc='slavesrc', masterdest=str(pth.join('mstr')), basename='base-%(final_file_name)s', workdir='.', blocksize=100) - factory, kw = inst.factory + factory = inst._getStepFactory().factory + kw = inst._getStepFactory().kwargs rebuilt = factory(**kw) rebuilt.build = FakeBuild() rebuilt.step_status = FakeStepStatus() @@ -145,3 +176,36 @@ step.commandComplete(cmd) summary = builder.summary_by_branch_and_revision[('trunk', '123')] assert summary.to_tuple() == (2, 2, 4, 0) + + +class TestParseRevision(object): + + def setup_method(self, mth): + inst = builds.ParseRevision() + factory = inst._getStepFactory().factory + kw = inst._getStepFactory().kwargs + self.rebuilt = factory(**kw) + self.rebuilt.step_status = FakeStepStatus() + self.rebuilt.deferred = FakeDeferred() + + def test_has_revision(self): + self.rebuilt.build = FakeBuild({'revision':u'123:ea5ca8'}) + self.rebuilt.start() + assert self.rebuilt.build.getProperties()['revision'] == 'ea5ca8' + assert self.rebuilt.build.getProperties()['original_revision'] == '123:ea5ca8' + assert self.rebuilt.build.getProperties()['final_file_name'] == '123-ea5ca8' + + def test_no_revision(self): + self.rebuilt.build = FakeBuild() + self.rebuilt.start() + assert self.rebuilt.build.getProperties()['revision'] is None + + def test_revision_no_local_part(self): + self.rebuilt.build = FakeBuild({'revision':u'ea5ca8'}) + self.rebuilt.start() + assert self.rebuilt.build.getProperties()['revision'] == 'ea5ca8' + + def test_empty_revision(self): + self.rebuilt.build = FakeBuild({'revision':u''}) + self.rebuilt.start() + assert self.rebuilt.build.getProperties()['revision'] == '' diff --git a/bot2/pypybuildbot/test/test_ircbot.py b/bot2/pypybuildbot/test/test_ircbot.py --- a/bot2/pypybuildbot/test/test_ircbot.py +++ b/bot2/pypybuildbot/test/test_ircbot.py @@ -1,50 +1,48 @@ from pypybuildbot import ircbot + def setup_module(mod): ircbot.USE_COLOR_CODES = False + def teardown_module(mod): ircbot.USE_COLOR_CODES = True + class FakeBuild(object): - def __init__(self, reason=None, source=None): - self.reason = reason - self.source = source + def __init__(self, reason=None, owner=None, branch=None): + self.properties = {'owner': owner, 'branch': branch, 'reason': reason} - def getReason(self): - return self.reason + def getProperty(self, name): + return self.properties.get(name, None) - def getSourceStamp(self): - return self.source -class FakeSource(object): - - def __init__(self, branch): - self.branch = branch - -def test_extract_username(): - a = FakeBuild("The web-page 'force build' button was pressed by 'antocuni': foo") +def test_get_build_information(): + a = FakeBuild(owner='antocuni', + reason="The web-page 'force build' button was pressed") b = FakeBuild("The web-page 'force build' button was ...") - assert ircbot.extract_username(a) == 'antocuni' - assert ircbot.extract_username(b) is None + assert ircbot.get_build_information(a) == \ + "antocuni: The web-page 'force build' button was pressed" + assert ircbot.get_build_information(b) == \ + "The web-page 'force build' button was ..." def test_get_description_for_build(): - a = FakeBuild('foobar', source=FakeSource(None)) + a = FakeBuild() msg = ircbot.get_description_for_build("http://myurl", a) assert msg == "http://myurl" - a = FakeBuild("The web-page 'force build' button was pressed by 'antocuni': foo", - source=FakeSource(None)) + a = FakeBuild(owner='antocuni', + reason="The web-page 'force build' button was pressed") msg = ircbot.get_description_for_build("http://myurl", a) - assert msg == "http://myurl [antocuni]" + assert msg == "http://myurl [antocuni: " \ + + "The web-page 'force build' button was pressed]" - a = FakeBuild('foobar', source=FakeSource('mybranch')) + a = FakeBuild(branch='mybranch') msg = ircbot.get_description_for_build("http://myurl", a) assert msg == "http://myurl [mybranch]" - a = FakeBuild("The web-page 'force build' button was pressed by 'antocuni': foo", - source=FakeSource('mybranch')) + a = FakeBuild(owner='antocuni', branch='mybranch') msg = ircbot.get_description_for_build("http://myurl", a) assert msg == "http://myurl [antocuni, mybranch]" diff --git a/bot2/pypybuildbot/test/test_pypylist.py b/bot2/pypybuildbot/test/test_pypylist.py --- a/bot2/pypybuildbot/test/test_pypylist.py +++ b/bot2/pypybuildbot/test/test_pypylist.py @@ -78,18 +78,20 @@ newdir.setmtime(oldtime + ascii * 10) pypylist = PyPyList(tmpdir.strpath) listener = pypylist.directoryListing() - assert listener.dirs == ['trunk', 'mmmm', 'llll', + assert listener.dirs == ['trunk', 'llll', 'kkkk','jjjj','iiii','hhhh','gggg','ffff','eeee', 'dddd','cccc','bbbb','aaaa'] def load_BuildmasterConfig(): import os - from pypybuildbot import summary, builds + from pypybuildbot import summary, builds, arm_master def load(name): if name == 'pypybuildbot.summary': return summary elif name == 'pypybuildbot.builds': return builds + elif name == 'pypybuildbot.arm_master': + return arm_master else: assert False diff --git a/bot2/pypybuildbot/test/test_summary.py b/bot2/pypybuildbot/test/test_summary.py --- a/bot2/pypybuildbot/test/test_summary.py +++ b/bot2/pypybuildbot/test/test_summary.py @@ -27,7 +27,7 @@ s a/b.py:test_three S a/c.py:test_four """) - + rev_outcome_set.populate(log) assert rev_outcome_set.skipped == set([("a.b","test_three"), @@ -67,7 +67,7 @@ x a/c.py:test_nine x a/c.py:test_ten """) - + rev_outcome_set.populate(log) sum = rev_outcome_set.get_summary() assert sum.p == 1 @@ -80,7 +80,7 @@ rev_outcome_set = summary.RevisionOutcomeSet('0') log = StringIO("") rev_outcome_set.populate(log) - + def test_populate_longrepr(self): rev_outcome_set = summary.RevisionOutcomeSet('50000') log = StringIO("""F a/b.py:test_one @@ -90,7 +90,7 @@ s a/b.py:test_three some skip """) - + rev_outcome_set.populate(log) assert len(rev_outcome_set.skipped) == 1 @@ -115,7 +115,7 @@ F a/b.py:test_two \xc3\xa5 bar """) - + rev_outcome_set.populate(log) assert len(rev_outcome_set.failed) == 2 @@ -133,7 +133,7 @@ ! ! /a/b/c.py:92 """) - + rev_outcome_set.populate(log) assert rev_outcome_set.failed == set([ @@ -151,12 +151,12 @@ log = StringIO("""x a/b.py EXC """) - + rev_outcome_set.populate(log) assert rev_outcome_set.numxfailed == 1 - - + + def test_absent_outcome(self): rev_outcome_set = summary.RevisionOutcomeSet('50000') @@ -169,7 +169,7 @@ def load(x, y): calls.append(y) return y - + cache._load_outcome_set = load res = cache.get('status', 'a') @@ -183,14 +183,14 @@ cache.get('status', 'b') res = cache.get('status', 'c') assert res == 'c' - + assert calls == ['a', 'b', 'c'] calls = [] res = cache.get('status', 'd') assert res == 'd' assert cache.get('status', 'c') == 'c' - assert cache.get('status', 'b') == 'b' + assert cache.get('status', 'b') == 'b' assert calls == ['d'] res = cache.get('status', 'a') @@ -208,18 +208,18 @@ s a/b.py:test_three x a/b.py:test_four """) - + rev_outcome_set_foo.populate(log) - key_bar = ('bar', 7) + key_bar = ('bar', 7) rev_outcome_set_bar = summary.RevisionOutcomeSet('50000', key_bar) log = StringIO(""". a/b.py:test_one . a/b.py:test_two s a/b.py:test_three """) - + rev_outcome_set_bar.populate(log) d = {'foo': rev_outcome_set_foo, @@ -228,7 +228,7 @@ goutcome = summary.GatherOutcomeSet(d) assert goutcome.revision == '50000' - + assert goutcome.failed == set([('foo', 'a.b', 'test_one')]) assert goutcome.skipped == set([('foo', 'a.b', 'test_three'), @@ -273,14 +273,14 @@ assert res == ' ' res = goutcome_top.get_longrepr(('what', 'foo', 'a.b', 'test_one')) - assert res == '' + assert res == '' def test_colsizes(): failed = [('a', 'abc', 'd'), ('ab', 'c', 'xy'), ('ab', '', 'cd')] - + res = summary.colsizes(failed) - + assert res == [2,3,2] def test__prune_runs(): @@ -330,15 +330,15 @@ res = summary.show_elapsed(0.25) assert res == "0.25s" res = summary.show_elapsed(1.0) - assert res == "1.00s" + assert res == "1.00s" res = summary.show_elapsed(1.25) - assert res == "1.25s" + assert res == "1.25s" res = summary.show_elapsed(4.5) assert res == "4.50s" res = summary.show_elapsed(5.25) assert res == "5s" res = summary.show_elapsed(5.5) - assert res == "6s" + assert res == "6s" res = summary.show_elapsed(2*60+30) assert res == "2m30" res = summary.show_elapsed(4*60+30) @@ -348,22 +348,33 @@ res = summary.show_elapsed(61*60) assert res == "1h1" res = summary.show_elapsed(90*60) - assert res == "1h30" + assert res == "1h30" -def _BuilderToStatus(status): - setup = {'name': 'builder', 'builddir': 'BUILDDIR', - 'slavebuilddir': 'SLAVEBUILDDIR', - 'factory': process_factory.BuildFactory() } - return process_builder.Builder(setup, status) +class FakeMasterConfig(object): + buildbotURL = "http://buildbot/" + logCompressionLimit = 0 + def __init__(self, builders=None): + self.builders = builders + + +class FakeBuilderconfig(object): + validNames = 'name factory slavenames builddir slavebuilddir category ' \ + 'nextSlave nextBuild canStartBuild locks env properties ' \ + 'mergeRequests description'.split() + + def __init__(self, **kwargs): + for kw, item in kwargs.iteritems(): + assert kw in self.validNames + setattr(self, kw, item) class FakeMaster(object): basedir = None - buildbotURL = "http://buildbot/" def __init__(self, builders): self.botmaster = FakeBotMaster(builders) + self.config = FakeMasterConfig() def subscribeToBuildsetCompletions(self, callback): pass @@ -374,6 +385,7 @@ def subscribeToBuildRequests(self, callback): pass + class FakeBotMaster(object): def __init__(self, builders): @@ -384,19 +396,22 @@ self.builderNames.append(name) self.builders[name] = _BuilderToStatus(builder) + class FakeSite(object): def __init__(self, status): self.buildbot_service = FakeService(status) + class FakeService(object): - + def __init__(self, status): self.status = status def getStatus(self): return self.status + class FakeRequest(object): def __init__(self, builders, args={}): @@ -406,6 +421,14 @@ self.site = FakeSite(status) +def _BuilderToStatus(status): + builder = process_builder.Builder(status.name) + builder.builder_status = status + builder.builder_status.basedir = 'BASEDIR' + builder.config = FakeBuilderconfig(factory=process_factory.BuildFactory()) + return builder + + def witness_cat_branch(summary): ref = [None] recentRuns = summary.recentRuns @@ -414,7 +437,6 @@ ref[0] = cat_branch return cat_branch summary.recentRuns = witness - return lambda: ref[0] class FakeLog(object): @@ -424,7 +446,7 @@ self.step = step self.name = name self.cont = cont - + def getStep(self): return self.step @@ -444,7 +466,7 @@ n = getattr(builder, 'nextBuildNumber', 0) t = 1000 for rev, reslog in builds: - build = status_builder.BuildStatus(builder, n) + build = status_builder.BuildStatus(builder, builder.master, n) build.started = time.time() build.setProperty('got_revision', str(rev), None) step = build.addStepWithName('pytest') @@ -453,16 +475,21 @@ step.started = t step.finished = t + (n+1)*60 t = step.finished + 30 + builder.buildCache.cache[build.number] = build + builder.buildStarted(build) build.buildFinished() - builder.touchBuildCache(build) n += 1 builder.nextBuildNumber = n - + + +METABRANCH = '' + class TestSummary(object): def setup_method(self, meth): summary.outcome_set_cache.clear() + self.master = FakeMaster([]) def test_sanity(self): s = summary.Summary() @@ -474,79 +501,78 @@ assert cat_branch == {} def test_one_build_no_rev(self): - builder = status_builder.BuilderStatus('builder0') - build = status_builder.BuildStatus(builder, 0) - build.started = time.time() + builder = status_builder.BuilderStatus('builder0', None, self.master, '') + build = status_builder.BuildStatus(builder, self.master, 0) + build.buildStarted(builder) build.buildFinished() - builder.touchBuildCache(build) - builder.nextBuildNumber = len(builder.buildCache) + builder.nextBuildNumber = len(builder.buildCache.cache) s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) out = s.body(req) cat_branch = res() - assert cat_branch == {(None, None): ({}, [build])} + assert cat_branch == {(None, METABRANCH): ({}, [build])} def test_one_build_no_logs(self): - builder = status_builder.BuilderStatus('builder0') - build = status_builder.BuildStatus(builder, 0) - build.started = time.time() + builder = status_builder.BuilderStatus('builder0', None, self.master, '') + build = status_builder.BuildStatus(builder, self.master, 0) + build.started = time.time() build.setProperty('got_revision', '50000', None) build.buildFinished() - builder.touchBuildCache(build) - builder.nextBuildNumber = len(builder.buildCache) + builder.buildCache.cache[build.number] = build + builder.nextBuildNumber = len(builder.buildCache.cache) s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) out = s.body(req) cat_branch = res() - - revs = cat_branch[(None, None)][0] + + revs = cat_branch[(None, METABRANCH)][0] assert revs.keys() == ['50000'] - assert '<run>' in out + assert 'success' in out def test_one_build_no_logs_failure(self): - builder = status_builder.BuilderStatus('builder0') - build = status_builder.BuildStatus(builder, 0) - build.started = time.time() + builder = status_builder.BuilderStatus('builder0', None, self.master, '') + build = status_builder.BuildStatus(builder, self.master, 0) + build.started = time.time() build.setProperty('got_revision', '50000', None) step = build.addStepWithName('step') step.setText(['step', 'borken']) step.stepFinished(summary.FAILURE) step1 = build.addStepWithName('other') step1.setText(['other', 'borken']) - step1.stepFinished(summary.FAILURE) + step1.stepFinished(summary.FAILURE) build.buildFinished() - builder.touchBuildCache(build) - builder.nextBuildNumber = len(builder.buildCache) + builder.buildCache.cache[build.number] = build + builder.nextBuildNumber = len(builder.buildCache.cache) s = summary.Summary() - res = witness_cat_branch(s) - req = FakeRequest([builder]) - out = s.body(req) - cat_branch = res() - - revs = cat_branch[(None, None)][0] - assert revs.keys() == ['50000'] - - assert 'step borken' in out - assert 'other borken' not in out - - def test_one_build(self): - builder = status_builder.BuilderStatus('builder0') - add_builds(builder, [(60000, "F TEST1\n. b")]) - - s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) out = s.body(req) cat_branch = res() - revs = cat_branch[(None, None)][0] + revs = cat_branch[(None, METABRANCH)][0] + assert revs.keys() == ['50000'] + + assert 'step borken' in out + assert 'other borken' not in out + + def test_one_build(self): + builder = status_builder.BuilderStatus('builder0', None, self.master, '') + add_builds(builder, [(60000, "F TEST1\n. b")]) + + s = summary.Summary() + res = witness_cat_branch(s) + req = FakeRequest([builder]) + out = s.body(req) + cat_branch = res() + + revs = cat_branch[(None, METABRANCH)][0] assert revs.keys() == ['60000'] outcome = revs['60000']['builder0'] assert outcome.revision == '60000' @@ -555,17 +581,17 @@ assert 'TEST1' in out def test_two_builds(self): - builder = status_builder.BuilderStatus('builder0') + builder = status_builder.BuilderStatus('builder0', None, self.master, '') add_builds(builder, [('60000', "F TEST1\n. b"), ('60001', ". TEST1\n. b")]) s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) out = s.body(req) cat_branch = res() - revs = cat_branch[(None, None)][0] + revs = cat_branch[(None, METABRANCH)][0] assert sorted(revs.keys()) == ['60000', '60001'] outcome = revs['60000']['builder0'] assert outcome.revision == '60000' @@ -582,20 +608,21 @@ assert 'TEST1' in out assert ':-)' in out - assert '\n - + success' in out - + assert re.search(r'\n - ' + r'\+ success', out) is not None def test_two_builds_samerev(self): - builder = status_builder.BuilderStatus('builder0') + builder = status_builder.BuilderStatus('builder0', None, self.master, '') add_builds(builder, [('60000', "F TEST1\n. b"), - ('60000', "F TEST1\n. b")]) + ('60000', "F TEST1\n. b")]) s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) out = s.body(req) cat_branch = res() - revs = cat_branch[(None, None)][0] + revs = cat_branch[(None, METABRANCH)][0] assert sorted(revs.keys()) == ['60000'] outcome = revs['60000']['builder0'] assert outcome.revision == '60000' @@ -604,18 +631,18 @@ assert 'TEST1' in out def test_two_builds_recentrev(self): - builder = status_builder.BuilderStatus('builder0') + builder = status_builder.BuilderStatus('builder0', None, self.master, '') add_builds(builder, [('60000', "F TEST1\n. b"), ('60001', "F TEST1\n. b")]) s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) req.args = {'recentrev': ['60000']} out = s.body(req) cat_branch = res() - revs = cat_branch[(None, None)][0] + revs = cat_branch[(None, METABRANCH)][0] assert sorted(revs.keys()) == ['60000'] outcome = revs['60000']['builder0'] assert outcome.revision == '60000' @@ -624,19 +651,19 @@ assert 'TEST1' in out def test_many_builds_query_builder(self): - builder = status_builder.BuilderStatus('builder0') + builder = status_builder.BuilderStatus('builder0', None, self.master, '') add_builds(builder, [('60000', "F TEST1\n. b"), ('60000', ". a\n. b"), - ('60001', "F TEST1\n. b")]) + ('60001', "F TEST1\n. b")]) s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) req.args={'builder': ['builder0']} out = s.body(req) cat_branch = res() - runs = cat_branch[(None, None)][0] + runs = cat_branch[(None, METABRANCH)][0] assert sorted(runs.keys()) == [(0, '60000'), (1, '60000'), (2, '60001')] outcome = runs[(0, '60000')]['builder0'] assert outcome.revision == '60000' @@ -660,20 +687,20 @@ def test_many_builds_query_builder_builds(self): - builder = status_builder.BuilderStatus('builder0') + builder = status_builder.BuilderStatus('builder0', None, self.master, '') add_builds(builder, [('60000', "F TEST1\n. b"), ('60000', ". a\n. b"), - ('60001', "F TEST1\n. b")]) + ('60001', "F TEST1\n. b")]) s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) req.args={'builder': ['builder0'], 'builds': ['0','2-2', '7']} out = s.body(req) cat_branch = res() - runs = cat_branch[(None, None)][0] + runs = cat_branch[(None, METABRANCH)][0] assert sorted(runs.keys()) == [(0, '60000'), (2, '60001')] outcome = runs[(0, '60000')]['builder0'] assert outcome.revision == '60000' @@ -692,21 +719,21 @@ assert 'TEST1' in out def test_many_pytestLogs(self): - builder = status_builder.BuilderStatus('builder1') - build = status_builder.BuildStatus(builder, 0) + builder = status_builder.BuilderStatus('builder1', '', self.master, '') + build = status_builder.BuildStatus(builder, self.master, 0) build.started = time.time() build.setProperty('got_revision', '70000', None) step = build.addStepWithName('pytest') step.logs.extend([FakeLog(step, 'pytestLog', "F TEST1")]) step.setText(["pytest", "failed"]) - step.stepFinished(summary.FAILURE) + step.stepFinished(summary.FAILURE) step2 = build.addStepWithName('pytest2') step2.logs.extend([FakeLog(step, 'pytestLog', ". x\nF TEST2")]) step2.setText(["pytest2", "aborted"]) step2.stepFinished(summary.EXCEPTION) build.buildFinished() - builder.touchBuildCache(build) - builder.nextBuildNumber = 1 + builder.buildCache.cache[build.number] = build + builder.nextBuildNumber = len(builder.buildCache.cache) s = summary.Summary() req = FakeRequest([builder]) @@ -719,23 +746,23 @@ assert 'pytest2 aborted' in out def test_subtle_failures(self): - builder = status_builder.BuilderStatus('builder1') - build = status_builder.BuildStatus(builder, 0) + builder = status_builder.BuilderStatus('builder1', '', self.master, '') + build = status_builder.BuildStatus(builder, self.master, 0) build.started = time.time() build.setProperty('got_revision', '70000', None) - step = build.addStepWithName('pytest') + step = build.addStepWithName('pytest') step.logs.extend([FakeLog(step, 'pytestLog', ". TEST1")]) step.setText(["pytest", "failed slave lost"]) - step.stepFinished(summary.FAILURE) + step.stepFinished(summary.FAILURE) build.buildFinished() - builder.touchBuildCache(build) - builder.nextBuildNumber = 1 + builder.buildCache.cache[build.number] = build + builder.nextBuildNumber = len(builder.buildCache.cache) s = summary.Summary() req = FakeRequest([builder]) out = s.body(req) - assert 'pytest failed slave lost' in out + assert 'pytest failed slave lost' in out def test_category_branch_sorting_key(self): @@ -764,19 +791,16 @@ assert res == (2, '', 2, 'release/1') res = s._cat_branch_key(('', 'what')) - assert res == (2, '', 4, 'what') + assert res == (2, '', 4, 'what') def test_builders_with_categories(self): - builder1 = status_builder.BuilderStatus('builder_foo') - builder1.category = 'foo' - builder2 = status_builder.BuilderStatus('builder_bar') - builder2.category = 'bar' - builder3 = status_builder.BuilderStatus('builder_') - builder3.category = '' + builder1 = status_builder.BuilderStatus('builder_foo', 'foo', self.master, '') + builder2 = status_builder.BuilderStatus('builder_bar', 'bar', self.master, '') + builder3 = status_builder.BuilderStatus('builder_', '', self.master, '') add_builds(builder1, [('60000', "F TEST1\n")]) add_builds(builder2, [('60000', "F TEST2\n")]) - add_builds(builder3, [('60000', "F TEST3\n")]) + add_builds(builder3, [('60000', "F TEST3\n")]) s = summary.Summary(['foo', 'bar']) req = FakeRequest([builder1, builder2, builder3]) @@ -792,7 +816,7 @@ assert "{bar}" in out def test_two_builds_different_rev_digits(self): - builder = status_builder.BuilderStatus('builder0') + builder = status_builder.BuilderStatus('builder0', '', self.master, '') add_builds(builder, [(999, "F TEST1\n. b"), (1000, "F TEST1\n. b")]) @@ -806,16 +830,16 @@ assert p999builder0-p999 == p1000builder0-p1000+1 def test_build_times_and_filtering(self): - builder1 = status_builder.BuilderStatus('builder1') - builder2 = status_builder.BuilderStatus('builder2') - + builder1 = status_builder.BuilderStatus('builder1', '', self.master, '') + builder2 = status_builder.BuilderStatus('builder2', '', self.master, '') + add_builds(builder1, [('60000', "F TEST1\n")]) - add_builds(builder2, [('50000', ". TEST2\n")]) + add_builds(builder2, [('50000', ". TEST2\n")]) add_builds(builder2, [('60000', "F TEST2\n")]) builder1.getBuild(0).started = 1228258800 # 3 Dec 2008 builder1.getBuild(0).finished = 1228258800 # 3 Dec 2008 - builder2.getBuild(1).started = 1228431600 # 5 Dec 2008 + builder2.getBuild(1).started = 1228431600 # 5 Dec 2008 builder2.getBuild(1).finished = 1228431600 # 5 Dec 2008 builder2.getBuild(0).started = 1227913200 # 29 Nov 2008 diff --git a/bot2/pypybuildbot/util.py b/bot2/pypybuildbot/util.py --- a/bot2/pypybuildbot/util.py +++ b/bot2/pypybuildbot/util.py @@ -2,7 +2,7 @@ import socket def we_are_debugging(): - return socket.gethostname() not in ("wyvern", "cobra") + return socket.gethostname() != 'cobra' def load(name): mod = __import__(name, {}, {}, ['__all__']) diff --git a/master/public_html/default.css b/master/public_html/default.css --- a/master/public_html/default.css +++ b/master/public_html/default.css @@ -10,6 +10,22 @@ color: #333; } +.auth { +position:absolute; +top:5px; +right:40px; +} + +.alert { + color: #c30000; + background-color: #f2dcdc; + padding: 5px 5px 5px 25px; + margin-bottom: 20px; + border-top:1px solid #ccc; + border-bottom:1px solid #ccc; + border-color: #c30000; + font-size: 20px; +} a:link,a:visited,a:active { color: #444; } @@ -197,14 +213,17 @@ font-weight: normal; padding: 8px 8px 8px 8px; color: #333333; + background-color: #eee; + text-align: left; +} + +td.DevBottom { border-bottom-right-radius: 5px; -webkit-border-bottom-right-radius: 5px; -moz-border-radius-bottomright: 5px; border-bottom-left-radius: 5px; -webkit-border-bottom-left-radius: 5px; -moz-border-radius-bottomleft: 5px; - background-color: #eee; - text-align: left; } td.Alt { @@ -212,9 +231,9 @@ } .legend { - border-radius: 5px; - -webkit-border-radius: 5px; - -moz-border-radius: 5px; + border-radius: 5px !important; + -webkit-border-radius: 5px !important; + -moz-border-radius: 5px !important; width: 100px; max-width: 100px; text-align: center; @@ -349,6 +368,12 @@ border-color: #A77272; } +.failure-again { + color: #000; + background-color: #eA9; + border-color: #A77272; +} + .warnings { color: #FFFFFF; background-color: #fa3; @@ -379,6 +404,12 @@ border-color: #C5C56D; } +.paused { + color: #FFFFFF; + background-color: #8080FF; + border-color: #dddddd; +} + .offline,td.offline { color: #FFFFFF; background-color: #777777; @@ -534,6 +565,10 @@ display: none; } +pre { + white-space: pre-wrap; +} + /* change comments (use regular colors here) */ pre.comments>a:link,pre.comments>a:visited { color: blue; @@ -542,3 +577,27 @@ pre.comments>a:active { color: purple; } + +form.command_forcebuild { + border-top: 1px solid black; + padding: .5em; + margin: .5em; +} + +form.command_forcebuild > .row { + border-top: 1px dotted gray; + padding: .5em 0; +} + +form.command_forcebuild .force-textarea > .label { + display: block; +} + +form.command_forcebuild .force-nested > .label { + font-weight: bold; + display: list-item; +} + +form.command_forcebuild .force-any .force-text { + display: inline; From noreply at buildbot.pypy.org Mon Nov 4 16:37:43 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 4 Nov 2013 16:37:43 +0100 (CET) Subject: [pypy-commit] buildbot numpy-tests: downloading works Message-ID: <20131104153743.D49A91C13E5@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-tests Changeset: r872:f78c853a0134 Date: 2013-11-04 14:00 +0200 http://bitbucket.org/pypy/buildbot/changeset/f78c853a0134/ Log: downloading works diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -1,4 +1,5 @@ from buildbot.steps.source.mercurial import Mercurial +from buildbot.steps.source.git import Git from buildbot.process.buildstep import BuildStep from buildbot.process import factory from buildbot.steps import shell, transfer @@ -85,7 +86,7 @@ properties = self.build.getProperties() branch = map_branch_name(properties['branch']) - revision = properties['final_file_name'] + revision = properties.getProperty('final_file_name') mastersrc = os.path.expanduser(self.mastersrc) if branch.startswith('/'): @@ -185,6 +186,7 @@ # XXX in general it would be nice to drop the revision-number using only the # changeset-id for got_revision and final_file_name and sorting the builds # chronologically + class UpdateGitCheckout(ShellCmd): description = 'git checkout' command = 'UNKNOWN' @@ -326,45 +328,16 @@ def update_git(platform, factory, repourl, workdir, use_branch, force_branch=None): - if platform == 'win32': - command = "if not exist .git rmdir /q /s ." - else: - command = "if [ ! -d .git ]; then rm -fr * .[a-z]*; fi" - factory.addStep(ShellCmd(description="rmdir?", - command=command, - workdir=workdir, - haltOnFailure=False)) - # - if platform == "win32": - command = "if not exist .git %s" - else: - command = "if [ ! -d .git ]; then %s; fi" - command = command % ("git clone " + repourl + " .") - factory.addStep(ShellCmd(description="git clone", - command=command, - workdir=workdir, - timeout=3600, - haltOnFailure=True)) - # - factory.addStep( - ShellCmd(description="git clean", - command="git clean", - workdir=workdir, - haltOnFailure=True)) - # - factory.addStep(ShellCmd(description="git pull", - command="git pull", - workdir=workdir)) - # - if use_branch or force_branch: - factory.addStep(UpdateGitCheckout(workdir=workdir, - haltOnFailure=True, - force_branch=force_branch)) - else: - factory.addStep(ShellCmd(description="git checkout", - command=WithProperties("git checkout -f %(revision)s"), - workdir=workdir)) - + factory.addstep( + Git( + repourl=repourl, + mode='full', + method='fresh', + defaultBranch=force_branch, + branchType='inrepo', + clobberOnBranchChange=False, + workdir=workdir, + logEnviron=False)) def setup_steps(platform, factory, workdir=None, repourl='https://bitbucket.org/pypy/pypy/', @@ -832,22 +805,21 @@ def __init__(self, platform='linux', app_tests=False, lib_python=False, - pypyjit=False, + pypyjit=True, prefix=None, translationArgs=[] ): factory.BuildFactory.__init__(self) - # XXX extend to checkout the specific revision of the build - setup_steps(platform, self) - + self.addStep(ParseRevision(hideStepIf=ParseRevision.hideStepIf, + doStepIf=ParseRevision.doStepIf)) # download corresponding nightly build self.addStep(ShellCmd( description="Clear pypy-c", command=['rm', '-rf', 'pypy-c'], workdir='.')) extension = get_extension(platform) - name = build_name(platform, pypyjit, translationArgs, placeholder='%(revision)s') + extension + name = build_name(platform, pypyjit, translationArgs, placeholder='%(final_file_name)s') + extension self.addStep(PyPyDownload( basename=name, mastersrc='~/nightly', @@ -860,34 +832,32 @@ else: self.addStep(ShellCmd( description="decompress pypy-c", - command=['tar', '--extract', '--file=pypy_build'+ extension, - '--strip-components=1', '--directory=.'], - workdir='pypy-c/download')) + command=['tar', '--extract', '--file=pypy_build'+ extension, '--strip-components=1', '--directory=.'], + workdir='pypy-c', + haltOnFailure=True, + )) # virtualenv the download self.addStep(ShellCmd( description="create virtualenv", - command=['virtualenv','-p', 'download/bin/pypy', 'install'], - workdir='pypy-c')) + command=['virtualenv','-p', 'bin/pypy', 'install'], + workdir='pypy-c', + haltOnFailure=True, + )) self.addStep(ShellCmd( description="install nose", command=['install/bin/pip', 'install','nose'], - workdir='pypy-c')) + workdir='pypy-c', + haltOnFailure=True, + )) # obtain a pypy-compatible branch of numpy - numpy_url = 'https://github.com/mattip/numpy' + numpy_url = 'https://www.bitbucket.org/pypy/numpy' numpy_pypy_branch = 'pypy' update_git(platform, self, numpy_url, 'numpy_src', use_branch=True, force_branch=numpy_pypy_branch) - if os.path.exists('pypy_c/download/lib_pypy/numpy.py'): - self.addStep(ShellCmd( - description="delete lib_pypy/numpy.*", - command=['rm', 'download/lib_pypy/numpy.*'], - workdir='pypy-c')) - - self.addStep(ShellCmd( description="install numpy", command=['install/bin/python', 'setup.py','install'], diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -148,7 +148,7 @@ pypyJITBenchmarkFactory64_tannit = pypybuilds.JITBenchmark(platform='linux64', postfix='-64') -pypyNumpyCompatability = pypybuilds.NativeNumpyTests() +pypyNumpyCompatability = pypybuilds.NativeNumpyTests(platform='linux64') # @@ -266,6 +266,7 @@ JITONLYLINUXPPC64, JITBENCH, JITBENCH64, + NUMPY_64, ] + ARM.builderNames, properties=[]), ] + ARM.schedulers, From noreply at buildbot.pypy.org Mon Nov 4 16:37:44 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 4 Nov 2013 16:37:44 +0100 (CET) Subject: [pypy-commit] buildbot numpy-tests: tests run Message-ID: <20131104153744.CEC601C13E5@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-tests Changeset: r873:433f33dd1fda Date: 2013-11-04 17:37 +0200 http://bitbucket.org/pypy/buildbot/changeset/433f33dd1fda/ Log: tests run diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -328,14 +328,11 @@ def update_git(platform, factory, repourl, workdir, use_branch, force_branch=None): - factory.addstep( + factory.addStep( Git( repourl=repourl, mode='full', method='fresh', - defaultBranch=force_branch, - branchType='inrepo', - clobberOnBranchChange=False, workdir=workdir, logEnviron=False)) @@ -840,35 +837,35 @@ # virtualenv the download self.addStep(ShellCmd( description="create virtualenv", - command=['virtualenv','-p', 'bin/pypy', 'install'], - workdir='pypy-c', + command=['virtualenv','-p', 'pypy-c/bin/pypy', 'install'], + workdir='./', haltOnFailure=True, )) self.addStep(ShellCmd( description="install nose", command=['install/bin/pip', 'install','nose'], - workdir='pypy-c', + workdir='./', haltOnFailure=True, )) # obtain a pypy-compatible branch of numpy numpy_url = 'https://www.bitbucket.org/pypy/numpy' - numpy_pypy_branch = 'pypy' + numpy_pypy_branch = 'pypy-compat' update_git(platform, self, numpy_url, 'numpy_src', use_branch=True, force_branch=numpy_pypy_branch) self.addStep(ShellCmd( description="install numpy", - command=['install/bin/python', 'setup.py','install'], + command=['../install/bin/python', 'setup.py','install'], workdir='numpy_src')) self.addStep(ShellCmd( description="test numpy", - command=['install/bin/python', '-c', '"import numpy;numpy.test()"', - '> pytest-numpy.log','2>&1'], - logfiles={'pytestLog': 'pytest-numpy.log'}, + command=['bin/nosetests', 'site-packages/numpy', + ], + #logfiles={'pytestLog': 'pytest-numpy.log'}, timeout=4000, - workdir='numpy_src', + workdir='install', #env={"PYTHONPATH": ['download']}, # shouldn't be needed, but what if it is set externally? )) From noreply at buildbot.pypy.org Mon Nov 4 18:10:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 4 Nov 2013 18:10:51 +0100 (CET) Subject: [pypy-commit] pypy array-overallocation-in-nursery: Start adapting the codewriter for the new rlist.py format Message-ID: <20131104171051.5139C1C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: array-overallocation-in-nursery Changeset: r67831:dc207f7dffcb Date: 2013-11-04 18:10 +0100 http://bitbucket.org/pypy/pypy/changeset/dc207f7dffcb/ Log: Start adapting the codewriter for the new rlist.py format diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1449,7 +1449,6 @@ descrs = () else: descrs = (self.cpu.arraydescrof(ARRAY), - self.cpu.fielddescrof(LIST, 'length'), self.cpu.fielddescrof(LIST, 'items'), self.cpu.sizeof(LIST)) else: @@ -1487,7 +1486,7 @@ fast = '_fast' in func.func_name return non_negative or fast - def _prepare_list_getset(self, op, descr, args, checkname): + def _prepare_list_getset(self, op, args, checkname, *descrs): non_negative = self._get_list_nonneg_canraise_flags(op) if non_negative: return args[1], [] @@ -1495,8 +1494,8 @@ v_posindex = Variable('posindex') v_posindex.concretetype = lltype.Signed op0 = SpaceOperation('-live-', [], None) - op1 = SpaceOperation(checkname, [args[0], args[1], - descr], v_posindex) + op1 = SpaceOperation(checkname, [args[0], args[1]] + list(descrs), + v_posindex) return v_posindex, [op0, op1] def _prepare_void_list_getset(self, op): @@ -1555,8 +1554,9 @@ SpaceOperation('getarrayitem_vable_%s' % kind[0], [v_base, args[1], arrayfielddescr, arraydescr], op.result)] - v_index, extraop = self._prepare_list_getset(op, arraydescr, args, - 'check_neg_index') + v_index, extraop = self._prepare_list_getset(op, args, + 'check_neg_index', + arraydescr) extra = getkind(op.result.concretetype)[0] if pure: extra += '_pure' @@ -1576,8 +1576,9 @@ SpaceOperation('setarrayitem_vable_%s' % kind[0], [v_base, args[1], args[2], arrayfielddescr, arraydescr], None)] - v_index, extraop = self._prepare_list_getset(op, arraydescr, args, - 'check_neg_index') + v_index, extraop = self._prepare_list_getset(op, args, + 'check_neg_index', + arraydescr) kind = getkind(args[2].concretetype)[0] op = SpaceOperation('setarrayitem_gc_%s' % kind, [args[0], v_index, args[2], arraydescr], None) @@ -1594,46 +1595,47 @@ # ---------- resizable lists ---------- - def do_resizable_newlist(self, op, args, arraydescr, lengthdescr, + def do_resizable_newlist(self, op, args, arraydescr, itemsdescr, structdescr): v_length = self._get_initial_newlist_length(op, args) return SpaceOperation('newlist', - [v_length, structdescr, lengthdescr, itemsdescr, + [v_length, structdescr, itemsdescr, arraydescr], + op.result) + + def do_resizable_newlist_hint(self, op, args, arraydescr, + itemsdescr, structdescr): + v_hint = self._get_initial_newlist_length(op, args) + return SpaceOperation('newlist_hint', + [v_hint, structdescr, itemsdescr, arraydescr], op.result) - def do_resizable_newlist_hint(self, op, args, arraydescr, lengthdescr, + def do_resizable_list_getitem(self, op, args, arraydescr, itemsdescr, structdescr): - v_hint = self._get_initial_newlist_length(op, args) - return SpaceOperation('newlist_hint', - [v_hint, structdescr, lengthdescr, itemsdescr, - arraydescr], - op.result) - - def do_resizable_list_getitem(self, op, args, arraydescr, lengthdescr, - itemsdescr, structdescr): - v_index, extraop = self._prepare_list_getset(op, lengthdescr, args, - 'check_resizable_neg_index') + v_index, extraop = self._prepare_list_getset(op, args, + 'check_resizable_neg_index', + itemsdescr, arraydescr) kind = getkind(op.result.concretetype)[0] op = SpaceOperation('getlistitem_gc_%s' % kind, [args[0], v_index, itemsdescr, arraydescr], op.result) return extraop + [op] - def do_resizable_list_setitem(self, op, args, arraydescr, lengthdescr, + def do_resizable_list_setitem(self, op, args, arraydescr, itemsdescr, structdescr): - v_index, extraop = self._prepare_list_getset(op, lengthdescr, args, - 'check_resizable_neg_index') + v_index, extraop = self._prepare_list_getset(op, args, + 'check_resizable_neg_index', + itemsdescr, arraydescr) kind = getkind(args[2].concretetype)[0] op = SpaceOperation('setlistitem_gc_%s' % kind, [args[0], v_index, args[2], itemsdescr, arraydescr], None) return extraop + [op] - def do_resizable_list_len(self, op, args, arraydescr, lengthdescr, + def do_resizable_list_len(self, op, args, arraydescr, itemsdescr, structdescr): - return SpaceOperation('getfield_gc_i', - [args[0], lengthdescr], op.result) + return SpaceOperation('getlistlen', + [args[0], itemsdescr, arraydescr], op.result) def do_resizable_void_list_getitem(self, op, args): self._prepare_void_list_getset(op) diff --git a/rpython/jit/codewriter/test/test_list.py b/rpython/jit/codewriter/test/test_list.py --- a/rpython/jit/codewriter/test/test_list.py +++ b/rpython/jit/codewriter/test/test_list.py @@ -11,9 +11,10 @@ # ____________________________________________________________ FIXEDLIST = lltype.Ptr(lltype.GcArray(lltype.Signed)) +OVERLIST = lltype.Ptr(lltype.GcArray(lltype.Signed, + hints={'overallocated': True})) VARLIST = lltype.Ptr(lltype.GcStruct('VARLIST', - ('length', lltype.Signed), - ('items', FIXEDLIST), + ('items', OVERLIST), adtmeths={"ITEM": lltype.Signed})) class FakeCPU: @@ -25,6 +26,7 @@ return '' class fielddescrof(AbstractDescr): def __init__(self, STRUCT, fieldname): + assert hasattr(STRUCT, fieldname) self.STRUCT = STRUCT self.fieldname = fieldname def __repr__(self): @@ -167,8 +169,7 @@ # Resizable lists def test_resizable_newlist(): - alldescrs = (", ," - " , ") + alldescrs = ", , " builtin_test('newlist', [], VARLIST, """newlist $0, """+alldescrs+""" -> %r0""") builtin_test('newlist', [Constant(5, lltype.Signed)], VARLIST, @@ -195,7 +196,7 @@ [varoftype(VARLIST), varoftype(lltype.Signed)], lltype.Signed, """ -live- - check_resizable_neg_index %r0, %i0, -> %i1 + check_resizable_neg_index %r0, %i0, , -> %i1 getlistitem_gc_i %r0, %i1, , -> %i2 """) @@ -211,13 +212,13 @@ varoftype(lltype.Signed)], lltype.Void, """ -live- - check_resizable_neg_index %r0, %i0, -> %i1 + check_resizable_neg_index %r0, %i0, , -> %i1 setlistitem_gc_i %r0, %i1, %i2, , """) def test_resizable_len(): builtin_test('list.len', [varoftype(VARLIST)], lltype.Signed, - """getfield_gc_i %r0, -> %i0""") + """getlistlen %r0, , -> %i0""") def test_resizable_unsupportedop(): builtin_test('list.foobar', [varoftype(VARLIST)], lltype.Signed, From noreply at buildbot.pypy.org Mon Nov 4 18:31:32 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 4 Nov 2013 18:31:32 +0100 (CET) Subject: [pypy-commit] buildbot add-header-to-nightly: add a header line of links to the nightly listing pages Message-ID: <20131104173132.7E1241C00D8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: add-header-to-nightly Changeset: r874:600464fa69ba Date: 2013-11-04 19:30 +0200 http://bitbucket.org/pypy/buildbot/changeset/600464fa69ba/ Log: add a header line of links to the nightly listing pages diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -6,6 +6,7 @@ import urllib import sys from twisted.web.static import File, DirectoryLister +from buildbot.status.web.base import path_to_root class PyPyTarball(object): @@ -145,7 +146,7 @@ Listener = PyPyDirectoryLister else: names = self.sortDirectoryNames(File.listEntities(self)) - Listener = DirectoryLister + Listener = PyPyDirectoryLister return Listener(self.path, names, self.contentTypes, @@ -158,7 +159,10 @@ class PyPyDirectoryLister(DirectoryLister): template = """ -%(header)s +%%(header)s + + + - -

%(header)s

+ +
+ Home + - + + Speed + Summary (trunk) + Summary + Nightly builds + + + Waterfall + + + + Builders + + + + + - About +
+

%%(header)s

@@ -205,7 +230,7 @@ -%(tableContent)s +%%(tableContent)s
@@ -224,6 +249,7 @@ def render(self, request): self.status = request.site.buildbot_service.getStatus() + self.template = self.template % {'path_to_root': path_to_root(request)} return DirectoryLister.render(self, request) def _buildTableContent(self, elements): diff --git a/slave/buildbot.tac b/slave/buildbot.tac --- a/slave/buildbot.tac +++ b/slave/buildbot.tac @@ -1,56 +1,44 @@ -# -*- mode: python -*- + +import os + from twisted.application import service -try: - # 8.x - from buildslave.bot import BuildSlave -except ImportError: - #7.x - from buildbot.slave.bot import BuildSlave +from buildslave.bot import BuildSlave -# --------------------------------------------------------------- -# manual editing of the automatically generated buildbot.tac -# -import os.path -thisfile = os.path.join(os.getcwd(), __file__) -basedir = os.path.abspath(os.path.dirname(thisfile)) -# -# --------------------------------------------------------------- +basedir = r'/home/matti/pypy_stuff/buildbot/slave' +rotateLength = 10000000 +maxRotatedFiles = 10 -def find_passwd(slavename): - masterdir = os.path.join(basedir, '..', 'master') - slaveinfo = os.path.join(masterdir, 'slaveinfo.py') - d = {} - try: - execfile(slaveinfo, d) - return d['passwords'][slavename] - except Exception, e: - print 'error when executing ../master/slaveinfo.py: %s' % repr(e) - print 'using default password for the slave' - return 'default_password' - +# if this is a relocatable tac file, get the directory containing the TAC +if basedir == '.': + import os.path + basedir = os.path.abspath(os.path.dirname(__file__)) -buildmaster_host = 'localhost' -port = 10407 -slavename = 'localhost' -passwd = find_passwd(slavename) -keepalive = 600 -usepty = 0 -umask = None -maxdelay = 300 -rotateLength = 1000000 -maxRotatedFiles = None +# note: this line is matched against to check that this is a buildslave +# directory; do not edit it. +application = service.Application('buildslave') -application = service.Application('buildslave') try: from twisted.python.logfile import LogFile from twisted.python.log import ILogObserver, FileLogObserver - logfile = LogFile.fromFullPath("twistd.log", rotateLength=rotateLength, + logfile = LogFile.fromFullPath(os.path.join(basedir, "twistd.log"), rotateLength=rotateLength, maxRotatedFiles=maxRotatedFiles) application.setComponent(ILogObserver, FileLogObserver(logfile).emit) except ImportError: # probably not yet twisted 8.2.0 and beyond, can't set log yet pass + +buildmaster_host = 'localhost' +port = 10407 +slavename = 'localhost' +passwd = 'stam' +keepalive = 600 +usepty = 0 +umask = None +maxdelay = 300 +allow_shutdown = None + s = BuildSlave(buildmaster_host, port, slavename, passwd, basedir, - keepalive, usepty, umask=umask, maxdelay=maxdelay) + keepalive, usepty, umask=umask, maxdelay=maxdelay, + allow_shutdown=allow_shutdown) s.setServiceParent(application) From noreply at buildbot.pypy.org Mon Nov 4 18:33:58 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 4 Nov 2013 18:33:58 +0100 (CET) Subject: [pypy-commit] buildbot add-header-to-nightly: revert buildbot.tac Message-ID: <20131104173358.9DF4E1C00D8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: add-header-to-nightly Changeset: r875:ec3983861744 Date: 2013-11-04 19:33 +0200 http://bitbucket.org/pypy/buildbot/changeset/ec3983861744/ Log: revert buildbot.tac diff --git a/slave/buildbot.tac b/slave/buildbot.tac --- a/slave/buildbot.tac +++ b/slave/buildbot.tac @@ -1,44 +1,56 @@ +# -*- mode: python -*- +from twisted.application import service +try: + # 8.x + from buildslave.bot import BuildSlave +except ImportError: + #7.x + from buildbot.slave.bot import BuildSlave -import os +# --------------------------------------------------------------- +# manual editing of the automatically generated buildbot.tac +# +import os.path +thisfile = os.path.join(os.getcwd(), __file__) +basedir = os.path.abspath(os.path.dirname(thisfile)) +# +# --------------------------------------------------------------- -from twisted.application import service -from buildslave.bot import BuildSlave +def find_passwd(slavename): + masterdir = os.path.join(basedir, '..', 'master') + slaveinfo = os.path.join(masterdir, 'slaveinfo.py') + d = {} + try: + execfile(slaveinfo, d) + return d['passwords'][slavename] + except Exception, e: + print 'error when executing ../master/slaveinfo.py: %s' % repr(e) + print 'using default password for the slave' + return 'default_password' + -basedir = r'/home/matti/pypy_stuff/buildbot/slave' -rotateLength = 10000000 -maxRotatedFiles = 10 +buildmaster_host = 'localhost' +port = 10407 +slavename = 'localhost' +passwd = find_passwd(slavename) +keepalive = 600 +usepty = 0 +umask = None +maxdelay = 300 +rotateLength = 1000000 +maxRotatedFiles = None -# if this is a relocatable tac file, get the directory containing the TAC -if basedir == '.': - import os.path - basedir = os.path.abspath(os.path.dirname(__file__)) - -# note: this line is matched against to check that this is a buildslave -# directory; do not edit it. application = service.Application('buildslave') - try: from twisted.python.logfile import LogFile from twisted.python.log import ILogObserver, FileLogObserver - logfile = LogFile.fromFullPath(os.path.join(basedir, "twistd.log"), rotateLength=rotateLength, + logfile = LogFile.fromFullPath("twistd.log", rotateLength=rotateLength, maxRotatedFiles=maxRotatedFiles) application.setComponent(ILogObserver, FileLogObserver(logfile).emit) except ImportError: # probably not yet twisted 8.2.0 and beyond, can't set log yet pass - -buildmaster_host = 'localhost' -port = 10407 -slavename = 'localhost' -passwd = 'stam' -keepalive = 600 -usepty = 0 -umask = None -maxdelay = 300 -allow_shutdown = None - s = BuildSlave(buildmaster_host, port, slavename, passwd, basedir, - keepalive, usepty, umask=umask, maxdelay=maxdelay, - allow_shutdown=allow_shutdown) + keepalive, usepty, umask=umask, maxdelay=maxdelay) s.setServiceParent(application) From noreply at buildbot.pypy.org Mon Nov 4 19:05:03 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 4 Nov 2013 19:05:03 +0100 (CET) Subject: [pypy-commit] pypy default: This is now teh default Message-ID: <20131104180503.95B361C01CB@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67832:8a79ffc73c9d Date: 2013-11-04 10:04 -0800 http://bitbucket.org/pypy/pypy/changeset/8a79ffc73c9d/ Log: This is now teh default diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -205,5 +205,3 @@ u'The PyPy Project', 1) ] -# Enable the new ReadTheDocs theme -RTD_NEW_THEME = True From noreply at buildbot.pypy.org Mon Nov 4 19:44:19 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 4 Nov 2013 19:44:19 +0100 (CET) Subject: [pypy-commit] pypy default: fix min/max of complex with nans Message-ID: <20131104184419.C11441C00D8@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67833:1fb8f989bf6a Date: 2013-11-04 13:43 -0500 http://bitbucket.org/pypy/pypy/changeset/1fb8f989bf6a/ Log: fix min/max of complex with nans diff --git a/pypy/module/micronumpy/test/dummy_module.py b/pypy/module/micronumpy/test/dummy_module.py --- a/pypy/module/micronumpy/test/dummy_module.py +++ b/pypy/module/micronumpy/test/dummy_module.py @@ -1,6 +1,7 @@ from _numpypy.multiarray import * from _numpypy.umath import * +nan = float('nan') newaxis = None ufunc = type(sin) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -319,6 +319,27 @@ assert x == 3 assert isinstance(x, (int, long)) + def test_complex_nan_extrema(self): + import math + import numpy as np + cnan = complex(0, np.nan) + + b = np.minimum(1, cnan) + assert b.real == 0 + assert math.isnan(b.imag) + + b = np.maximum(1, cnan) + assert b.real == 0 + assert math.isnan(b.imag) + + b = np.fmin(1, cnan) + assert b.real == 1 + assert b.imag == 0 + + b = np.fmax(1, cnan) + assert b.real == 1 + assert b.imag == 0 + def test_multiply(self): from numpypy import array, multiply, arange diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1192,11 +1192,11 @@ def _lt(self, v1, v2): (r1, i1), (r2, i2) = v1, v2 - if r1 < r2: + if r1 < r2 and not rfloat.isnan(i1) and not rfloat.isnan(i2): return True - elif not r1 <= r2: - return False - return i1 < i2 + if r1 == r2 and i1 < i2: + return True + return False @raw_binary_op def lt(self, v1, v2): @@ -1234,10 +1234,14 @@ return self._bool(v1) ^ self._bool(v2) def min(self, v1, v2): - return self.fmin(v1, v2) + if self.le(v1, v2) or self.isnan(v1): + return v1 + return v2 def max(self, v1, v2): - return self.fmax(v1, v2) + if self.ge(v1, v2) or self.isnan(v1): + return v1 + return v2 @complex_binary_op def floordiv(self, v1, v2): @@ -1292,20 +1296,12 @@ return -1,0 def fmax(self, v1, v2): - if self.isnan(v2): - return v1 - elif self.isnan(v1): - return v2 - if self.ge(v1, v2): + if self.ge(v1, v2) or self.isnan(v2): return v1 return v2 def fmin(self, v1, v2): - if self.isnan(v2): - return v1 - elif self.isnan(v1): - return v2 - if self.le(v1, v2): + if self.le(v1, v2) or self.isnan(v2): return v1 return v2 From noreply at buildbot.pypy.org Mon Nov 4 20:37:55 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 4 Nov 2013 20:37:55 +0100 (CET) Subject: [pypy-commit] pypy default: fix str_format of complex nan/inf Message-ID: <20131104193755.41A461C0651@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67834:c3b67fad2bc1 Date: 2013-11-04 14:37 -0500 http://bitbucket.org/pypy/pypy/changeset/c3b67fad2bc1/ Log: fix str_format of complex nan/inf diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -70,3 +70,9 @@ x = tp(1+2j) assert hasattr(x, '__complex__') == (tp != np.cdouble) assert complex(x) == 1+2j + + def test_complex_str_format(self): + import numpy as np + assert str(np.complex128(complex(1, float('nan')))) == '(1+nan*j)' + assert str(np.complex128(complex(1, float('inf')))) == '(1+inf*j)' + assert str(np.complex128(complex(1, float('-inf')))) == '(1-inf*j)' diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1027,14 +1027,17 @@ def str_format(self, box): real, imag = self.for_computation(self.unbox(box)) - imag_str = str_format(imag) + 'j' + imag_str = str_format(imag) + if rfloat.isnan(imag) or rfloat.isinf(imag): + imag_str += '*' + imag_str += 'j' # (0+2j) => 2j if real == 0: return imag_str real_str = str_format(real) - op = '+' if imag >= 0 else '' + op = '+' if rfloat.copysign(1, imag) > 0 else '' return ''.join(['(', real_str, op, imag_str, ')']) def fill(self, storage, width, box, start, stop, offset): From noreply at buildbot.pypy.org Mon Nov 4 21:26:53 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 4 Nov 2013 21:26:53 +0100 (CET) Subject: [pypy-commit] pypy default: New FAQ entry: Module xyz does not work with PyPy: ImportError Message-ID: <20131104202653.05A2B1C01DC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67836:e59a35b9ec62 Date: 2013-11-04 21:26 +0100 http://bitbucket.org/pypy/pypy/changeset/e59a35b9ec62/ Log: New FAQ entry: Module xyz does not work with PyPy: ImportError diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -45,6 +45,41 @@ We list the differences we know about in `cpython differences`_. +----------------------------------------------- +Module xyz does not work with PyPy: ImportError +----------------------------------------------- + +A module installed for CPython is not automatically available for PyPy +--- just like a module installed for CPython 2.6 is not automatically +available for CPython 2.7 if you installed both. In other words, you +need to install the module xyz specifically for PyPy. + +On Linux, this means that you cannot use ``apt-get`` or some similar +package manager: these tools are only meant *for the version of CPython +provided by the same package manager.* So forget about them for now +and read on. + +It is quite common nowadays that xyz is available on PyPI_ and +installable with ``pip install xyz``. The simplest solution is to `use +virtualenv (as documented here)`_. Then enter (activate) the virtualenv +and type: ``pip install xyz``. + +If you get errors from the C compiler, the module is a CPython C +Extension module using unsupported features. `See below.`_ + +Alternatively, if either the module xyz is not available on PyPI or you +don't want to use virtualenv, then download the source code of xyz, +decompress the zip/tarball, and run the standard command: ``pypy +setup.py install``. (Note: `pypy` here instead of `python`.) As usual +you may need to run the command with `sudo` for a global installation. +The other commands of ``setup.py`` are available too, like ``build``. + +.. _PyPI: https://pypi.python.org/pypi +.. _`use virtualenv (as documented here)`: getting-started.html#installing-using-virtualenv + + +.. _`See below.`: + -------------------------------------------- Do CPython Extension modules work with PyPy? -------------------------------------------- @@ -55,7 +90,9 @@ extension modules in PyPy are often much slower than in CPython due to the need to emulate refcounting. It is often faster to take out your CPython extension and replace it with a pure python version that the -JIT can see. +JIT can see. If trying to install module xyz, and the module has both +a C and a Python version of the same code, try first to disable the C +version; this is usually easily done by changing some line in ``setup.py``. We fully support ctypes-based extensions. But for best performance, we recommend that you use the cffi_ module to interface with C code. From noreply at buildbot.pypy.org Mon Nov 4 21:26:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 4 Nov 2013 21:26:51 +0100 (CET) Subject: [pypy-commit] pypy array-overallocation-in-nursery: Rewrite the 'jit_conditional_call' operation to 'conditional_call' and Message-ID: <20131104202651.933971C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: array-overallocation-in-nursery Changeset: r67835:d32ed008256d Date: 2013-11-04 20:51 +0100 http://bitbucket.org/pypy/pypy/changeset/d32ed008256d/ Log: Rewrite the 'jit_conditional_call' operation to 'conditional_call' and always allow it to occur, even if not jitted. Avoids the mess of '*args' and playing around with 'if we_are_jitted()'. diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1350,7 +1350,7 @@ return [] return getattr(self, 'handle_jit_marker__%s' % key)(op, jitdriver) - def rewrite_op_jit_conditional_call(self, op): + def rewrite_op_conditional_call(self, op): have_floats = False for arg in op.args: if getkind(arg.concretetype) == 'float': diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -999,20 +999,13 @@ return hop.genop('jit_record_known_class', [v_inst, v_cls], resulttype=lltype.Void) -def _jit_conditional_call(condition, function, *args): - pass - - at specialize.call_location() def conditional_call(condition, function, *args): - if we_are_jitted(): - _jit_conditional_call(condition, function, *args) - else: - if condition: - function(*args) -conditional_call._always_inline_ = True + "NOT_RPYTHON" + if condition: + function(*args) class ConditionalCallEntry(ExtRegistryEntry): - _about_ = _jit_conditional_call + _about_ = conditional_call def compute_result_annotation(self, *args_s): self.bookkeeper.emulate_pbc_call(self.bookkeeper.position_key, @@ -1025,7 +1018,7 @@ args_v[1] = hop.args_r[1].get_concrete_llfn(hop.args_s[1], hop.args_s[2:], hop.spaceop) hop.exception_is_here() - return hop.genop('jit_conditional_call', args_v) + return hop.genop('conditional_call', args_v) class Counters(object): counters=""" diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -529,8 +529,11 @@ def op_jit_record_known_class(self, *args): pass - def op_jit_conditional_call(self, *args): - raise NotImplementedError("should not be called while not jitted") + def op_conditional_call(self, condition, function, *args): + assert isinstance(condition, bool) + if condition: + res = self.op_direct_call(function, *args) + assert res is None def op_get_exception_addr(self, *args): raise NotImplementedError diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -450,7 +450,7 @@ 'jit_force_quasi_immutable': LLOp(canrun=True), 'jit_record_known_class' : LLOp(canrun=True), 'jit_ffi_save_result': LLOp(canrun=True), - 'jit_conditional_call': LLOp(), + 'conditional_call': LLOp(), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canmallocgc=True), diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -431,14 +431,16 @@ break return line - def OP_DIRECT_CALL(self, op): - fn = op.args[0] + def _op_direct_call(self, fn, args_v, result): try: targets = [fn.value._obj.graph] except AttributeError: targets = None return self.generic_call(fn.concretetype, self.expr(fn), - op.args[1:], op.result, targets) + args_v, result, targets) + + def OP_DIRECT_CALL(self, op): + return self._op_direct_call(op.args[0], op.args[1:], op.result) def OP_INDIRECT_CALL(self, op): fn = op.args[0] @@ -454,8 +456,12 @@ fnexpr = '((%s)%s)' % (cdecl(typename, ''), self.expr(fnaddr)) return self.generic_call(FUNC, fnexpr, op.args[1:], op.result) - def OP_JIT_CONDITIONAL_CALL(self, op): - return 'abort(); /* jit_conditional_call */' + def OP_CONDITIONAL_CALL(self, op): + condition = self.expr(op.args[0]) + assert op.result.concretetype is Void + call = self._op_direct_call(op.args[1], op.args[2:], op.result) + return 'if (%s) { /* conditional_call */\n\t%s\n}' % ( + condition, call.replace('\n', '\n\t')) # low-level operations def generic_get(self, op, sourceexpr): From noreply at buildbot.pypy.org Mon Nov 4 22:12:02 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 4 Nov 2013 22:12:02 +0100 (CET) Subject: [pypy-commit] pypy cpyext-int: merge default into branch Message-ID: <20131104211202.A77B81C01F2@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: cpyext-int Changeset: r67837:152c025075fa Date: 2013-10-22 21:32 +0300 http://bitbucket.org/pypy/pypy/changeset/152c025075fa/ Log: merge default into branch diff too long, truncating to 2000 out of 4446 lines diff --git a/lib_pypy/numpypy/core/arrayprint.py b/lib_pypy/numpypy/core/arrayprint.py --- a/lib_pypy/numpypy/core/arrayprint.py +++ b/lib_pypy/numpypy/core/arrayprint.py @@ -247,10 +247,11 @@ formatdict = {'bool' : _boolFormatter, 'int' : IntegerFormat(data), 'float' : FloatFormat(data, precision, suppress_small), - 'longfloat' : LongFloatFormat(precision), + 'longfloat' : FloatFormat(data, precision, suppress_small), 'complexfloat' : ComplexFormat(data, precision, suppress_small), - 'longcomplexfloat' : LongComplexFormat(precision), + 'longcomplexfloat' : ComplexFormat(data, precision, + suppress_small), 'datetime' : DatetimeFormat(data), 'timedelta' : TimedeltaFormat(data), 'numpystr' : repr_format, diff --git a/lib_pypy/numpypy/core/numerictypes.py b/lib_pypy/numpypy/core/numerictypes.py --- a/lib_pypy/numpypy/core/numerictypes.py +++ b/lib_pypy/numpypy/core/numerictypes.py @@ -395,6 +395,9 @@ ('int_', 'long'), ('uint', 'ulong'), ('cfloat', 'cdouble'), + ('longfloat', 'longdouble'), + ('clongfloat', 'clongdouble'), + ('longcomplex', 'clongdouble'), ('bool_', 'bool'), ('unicode_', 'unicode'), ] diff --git a/lib_pypy/numpypy/lib/__init__.py b/lib_pypy/numpypy/lib/__init__.py --- a/lib_pypy/numpypy/lib/__init__.py +++ b/lib_pypy/numpypy/lib/__init__.py @@ -5,10 +5,12 @@ from .function_base import * from .shape_base import * from .twodim_base import * +from .ufunclike import * from .utils import * __all__ = ['math'] __all__ += function_base.__all__ __all__ += shape_base.__all__ __all__ += twodim_base.__all__ +__all__ += ufunclike.__all__ __all__ += utils.__all__ diff --git a/lib_pypy/numpypy/lib/ufunclike.py b/lib_pypy/numpypy/lib/ufunclike.py new file mode 100644 --- /dev/null +++ b/lib_pypy/numpypy/lib/ufunclike.py @@ -0,0 +1,177 @@ +""" +Module of functions that are like ufuncs in acting on arrays and optionally +storing results in an output array. + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['fix', 'isneginf', 'isposinf'] + +from ..core import numeric as nx + +def fix(x, y=None): + """ + Round to nearest integer towards zero. + + Round an array of floats element-wise to nearest integer towards zero. + The rounded values are returned as floats. + + Parameters + ---------- + x : array_like + An array of floats to be rounded + y : ndarray, optional + Output array + + Returns + ------- + out : ndarray of floats + The array of rounded numbers + + See Also + -------- + trunc, floor, ceil + around : Round to given number of decimals + + Examples + -------- + >>> np.fix(3.14) + 3.0 + >>> np.fix(3) + 3.0 + >>> np.fix([2.1, 2.9, -2.1, -2.9]) + array([ 2., 2., -2., -2.]) + + """ + x = nx.asanyarray(x) + y1 = nx.floor(x) + y2 = nx.ceil(x) + if y is None: + y = nx.asanyarray(y1) + y[...] = nx.where(x >= 0, y1, y2) + return y + +def isposinf(x, y=None): + """ + Test element-wise for positive infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + y : array_like, optional + A boolean array with the same shape as `x` to store the result. + + Returns + ------- + y : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a boolean array is returned + with values True where the corresponding element of the input is + positive infinity and values False where the element of the input is + not positive infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as zeros + and ones, if the type is boolean then as False and True. + The return value `y` is then a reference to that array. + + See Also + -------- + isinf, isneginf, isfinite, isnan + + Notes + ----- + Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when `x` is a + scalar input, or if first and second arguments have different shapes. + + Examples + -------- + >>> np.isposinf(np.PINF) + array(True, dtype=bool) + >>> np.isposinf(np.inf) + array(True, dtype=bool) + >>> np.isposinf(np.NINF) + array(False, dtype=bool) + >>> np.isposinf([-np.inf, 0., np.inf]) + array([False, False, True], dtype=bool) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isposinf(x, y) + array([0, 0, 1]) + >>> y + array([0, 0, 1]) + + """ + if y is None: + x = nx.asarray(x) + y = nx.empty(x.shape, dtype=nx.bool_) + nx.logical_and(nx.isinf(x), ~nx.signbit(x), y) + return y + +def isneginf(x, y=None): + """ + Test element-wise for negative infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + y : array_like, optional + A boolean array with the same shape and type as `x` to store the + result. + + Returns + ------- + y : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a numpy boolean array is + returned with values True where the corresponding element of the + input is negative infinity and values False where the element of + the input is not negative infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as + zeros and ones, if the type is boolean then as False and True. The + return value `y` is then a reference to that array. + + See Also + -------- + isinf, isposinf, isnan, isfinite + + Notes + ----- + Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when x is a scalar + input, or if first and second arguments have different shapes. + + Examples + -------- + >>> np.isneginf(np.NINF) + array(True, dtype=bool) + >>> np.isneginf(np.inf) + array(False, dtype=bool) + >>> np.isneginf(np.PINF) + array(False, dtype=bool) + >>> np.isneginf([-np.inf, 0., np.inf]) + array([ True, False, False], dtype=bool) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isneginf(x, y) + array([1, 0, 0]) + >>> y + array([1, 0, 0]) + + """ + if y is None: + x = nx.asarray(x) + y = nx.empty(x.shape, dtype=nx.bool_) + nx.logical_and(nx.isinf(x), nx.signbit(x), y) + return y diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -109,4 +109,5 @@ .. branch: file-support-in-rpython make open() and friends rpython - +.. branch: incremental-gc +Added the new incminimark GC which performs GC in incremental steps diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -239,6 +239,18 @@ # _____ this code is here to support testing only _____ return self + def unpackiterable_int(self, space): + lst = space.listview_int(self) + if lst: + return lst[:] + return None + + def unpackiterable_float(self, space): + lst = space.listview_float(self) + if lst: + return lst[:] + return None + class W_InterpIterable(W_Root): def __init__(self, space, w_iterable): @@ -838,6 +850,22 @@ return self._unpackiterable_known_length_jitlook(w_iterator, expected_length) + + def unpackiterable_int(self, w_obj): + """ + Return a RPython list of unwrapped ints out of w_obj. The list is + guaranteed to be acopy of the actual data contained in w_obj, so you + can freely modify it. It might return None if not supported. + """ + return w_obj.unpackiterable_int(self) + + def unpackiterable_float(self, w_obj): + """ + Same as unpackiterable_int, but for floats. + """ + return w_obj.unpackiterable_float(self) + + def length_hint(self, w_obj, default): """Return the length of an object, consulting its __length_hint__ method if necessary. @@ -895,6 +923,20 @@ """ return None + def listview_int(self, w_list): + """ Return a list of unwrapped int out of a list of int. If the + argument is not a list or does not contain only int, return None. + May return None anyway. + """ + return None + + def listview_float(self, w_list): + """ Return a list of unwrapped float out of a list of float. If the + argument is not a list or does not contain only float, return None. + May return None anyway. + """ + return None + def view_as_kwargs(self, w_dict): """ if w_dict is a kwargs-dict, return two lists, one of unwrapped strings and one of wrapped values. otherwise return (None, None) diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -282,6 +282,12 @@ def iter(self): return self.ctype.iter(self) + def unpackiterable_int(self, space): + return self.ctype.aslist_int(self) + + def unpackiterable_float(self, space): + return self.ctype.aslist_float(self) + @specialize.argtype(1) def write_raw_signed_data(self, source): misc.write_raw_signed_data(self._cdata, source, self.ctype.size) diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -105,6 +105,26 @@ def iter(self, cdata): return W_CDataIter(self.space, self.ctitem, cdata) + def aslist_int(self, cdata): + from rpython.rlib.rarray import populate_list_from_raw_array + if self.ctitem.is_long(): + res = [] + buf = rffi.cast(rffi.LONGP, cdata._cdata) + length = cdata.get_array_length() + populate_list_from_raw_array(res, buf, length) + return res + return None + + def aslist_float(self, cdata): + from rpython.rlib.rarray import populate_list_from_raw_array + if self.ctitem.is_double(): + res = [] + buf = rffi.cast(rffi.DOUBLEP, cdata._cdata) + length = cdata.get_array_length() + populate_list_from_raw_array(res, buf, length) + return res + return None + def get_vararg_type(self): return self.ctptr diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -43,6 +43,12 @@ def is_unichar_ptr_or_array(self): return False + def is_long(self): + return False + + def is_double(self): + return False + def newp(self, w_init): space = self.space raise operationerrfmt(space.w_TypeError, @@ -163,6 +169,9 @@ "cdata '%s' does not support iteration", self.name) + def unpackiterable_int(self, cdata): + return None + def get_vararg_type(self): return self diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -85,7 +85,6 @@ return self.space.wrap(s) return W_CType.string(self, cdataobj, maxlen) - class W_CTypePrimitiveCharOrUniChar(W_CTypePrimitive): _attrs_ = [] is_primitive_integer = True @@ -171,6 +170,9 @@ self.vmin = r_uint(-1) << (sh - 1) self.vrangemax = (r_uint(1) << sh) - 1 + def is_long(self): + return self.size == rffi.sizeof(lltype.Signed) + def cast_to_int(self, cdata): return self.convert_to_object(cdata) @@ -274,6 +276,9 @@ class W_CTypePrimitiveFloat(W_CTypePrimitive): _attrs_ = [] + def is_double(self): + return self.size == rffi.sizeof(lltype.Float) + def cast(self, w_ob): space = self.space if isinstance(w_ob, cdataobj.W_CData): diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -42,6 +42,12 @@ def is_char_or_unichar_ptr_or_array(self): return isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveCharOrUniChar) + def aslist_int(self, cdata): + return None + + def aslist_float(self, cdata): + return None + def cast(self, w_ob): # cast to a pointer, to a funcptr, or to an array. # Note that casting to an array is an extension to the C language, @@ -58,19 +64,45 @@ value = rffi.cast(rffi.CCHARP, value) return cdataobj.W_CData(space, value, self) + def _convert_array_from_list_strategy_maybe(self, cdata, w_ob): + from rpython.rlib.rarray import copy_list_to_raw_array + int_list = self.space.listview_int(w_ob) + float_list = self.space.listview_float(w_ob) + # + if self.ctitem.is_long() and int_list is not None: + cdata = rffi.cast(rffi.LONGP, cdata) + copy_list_to_raw_array(int_list, cdata) + return True + # + if self.ctitem.is_double() and float_list is not None: + cdata = rffi.cast(rffi.DOUBLEP, cdata) + copy_list_to_raw_array(float_list, cdata) + return True + # + return False + + def _convert_array_from_listview(self, cdata, w_ob): + space = self.space + lst_w = space.listview(w_ob) + if self.length >= 0 and len(lst_w) > self.length: + raise operationerrfmt(space.w_IndexError, + "too many initializers for '%s' (got %d)", + self.name, len(lst_w)) + ctitem = self.ctitem + for i in range(len(lst_w)): + ctitem.convert_from_object(cdata, lst_w[i]) + cdata = rffi.ptradd(cdata, ctitem.size) + def convert_array_from_object(self, cdata, w_ob): space = self.space + if self._convert_array_from_list_strategy_maybe(cdata, w_ob): + # the fast path worked, we are done now + return + # + # continue with the slow path if (space.isinstance_w(w_ob, space.w_list) or space.isinstance_w(w_ob, space.w_tuple)): - lst_w = space.listview(w_ob) - if self.length >= 0 and len(lst_w) > self.length: - raise operationerrfmt(space.w_IndexError, - "too many initializers for '%s' (got %d)", - self.name, len(lst_w)) - ctitem = self.ctitem - for i in range(len(lst_w)): - ctitem.convert_from_object(cdata, lst_w[i]) - cdata = rffi.ptradd(cdata, ctitem.size) + self._convert_array_from_listview(cdata, w_ob) elif (self.can_cast_anything or (self.ctitem.is_primitive_integer and self.ctitem.size == rffi.sizeof(lltype.Char))): diff --git a/pypy/module/_cffi_backend/test/test_fastpath.py b/pypy/module/_cffi_backend/test/test_fastpath.py new file mode 100644 --- /dev/null +++ b/pypy/module/_cffi_backend/test/test_fastpath.py @@ -0,0 +1,100 @@ +# side-effect: FORMAT_LONGDOUBLE must be built before test_checkmodule() +from pypy.module._cffi_backend import misc +from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray + +class AppTest_fast_path_from_list(object): + spaceconfig = dict(usemodules=('_cffi_backend', 'cStringIO')) + + def setup_method(self, meth): + def forbidden(self, *args): + assert False, 'The slow path is forbidden' + self._original = W_CTypePtrOrArray._convert_array_from_listview.im_func + W_CTypePtrOrArray._convert_array_from_listview = forbidden + + def teardown_method(self, meth): + W_CTypePtrOrArray._convert_array_from_listview = self._original + + def test_fast_init_from_list(self): + import _cffi_backend + LONG = _cffi_backend.new_primitive_type('long') + P_LONG = _cffi_backend.new_pointer_type(LONG) + LONG_ARRAY = _cffi_backend.new_array_type(P_LONG, None) + buf = _cffi_backend.newp(LONG_ARRAY, [1, 2, 3]) + assert buf[0] == 1 + assert buf[1] == 2 + assert buf[2] == 3 + + def test_fast_init_from_list_float(self): + import _cffi_backend + DOUBLE = _cffi_backend.new_primitive_type('double') + P_DOUBLE = _cffi_backend.new_pointer_type(DOUBLE) + DOUBLE_ARRAY = _cffi_backend.new_array_type(P_DOUBLE, None) + buf = _cffi_backend.newp(DOUBLE_ARRAY, [1.1, 2.2, 3.3]) + assert buf[0] == 1.1 + assert buf[1] == 2.2 + assert buf[2] == 3.3 + + +class AppTest_fast_path_to_list(object): + spaceconfig = dict(usemodules=('_cffi_backend', 'cStringIO')) + + def setup_method(self, meth): + from pypy.interpreter import gateway + from rpython.rlib import rarray + # + self.count = 0 + def get_count(*args): + return self.space.wrap(self.count) + self.w_get_count = self.space.wrap(gateway.interp2app(get_count)) + # + original = rarray.populate_list_from_raw_array + def populate_list_from_raw_array(*args): + self.count += 1 + return original(*args) + self._original = original + rarray.populate_list_from_raw_array = populate_list_from_raw_array + # + self.w_runappdirect = self.space.wrap(self.runappdirect) + + + def teardown_method(self, meth): + from rpython.rlib import rarray + rarray.populate_list_from_raw_array = self._original + + def test_list_int(self): + import _cffi_backend + LONG = _cffi_backend.new_primitive_type('long') + P_LONG = _cffi_backend.new_pointer_type(LONG) + LONG_ARRAY = _cffi_backend.new_array_type(P_LONG, 3) + buf = _cffi_backend.newp(LONG_ARRAY) + buf[0] = 1 + buf[1] = 2 + buf[2] = 3 + lst = list(buf) + assert lst == [1, 2, 3] + if not self.runappdirect: + assert self.get_count() == 1 + + def test_TypeError_if_no_length(self): + import _cffi_backend + LONG = _cffi_backend.new_primitive_type('long') + P_LONG = _cffi_backend.new_pointer_type(LONG) + LONG_ARRAY = _cffi_backend.new_array_type(P_LONG, 3) + buf = _cffi_backend.newp(LONG_ARRAY) + pbuf = _cffi_backend.cast(P_LONG, buf) + raises(TypeError, "list(pbuf)") + + + def test_list_float(self): + import _cffi_backend + DOUBLE = _cffi_backend.new_primitive_type('double') + P_DOUBLE = _cffi_backend.new_pointer_type(DOUBLE) + DOUBLE_ARRAY = _cffi_backend.new_array_type(P_DOUBLE, 3) + buf = _cffi_backend.newp(DOUBLE_ARRAY) + buf[0] = 1.1 + buf[1] = 2.2 + buf[2] = 3.3 + lst = list(buf) + assert lst == [1.1, 2.2, 3.3] + if not self.runappdirect: + assert self.get_count() == 1 diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -1,5 +1,4 @@ from pypy.interpreter.mixedmodule import MixedModule -from pypy.module.micronumpy.interp_boxes import long_double_size, ENABLED_LONG_DOUBLE class MultiArrayModule(MixedModule): @@ -64,6 +63,7 @@ ("less_equal", "less_equal"), ("maximum", "maximum"), ("minimum", "minimum"), + ("mod", "mod"), ("multiply", "multiply"), ("negative", "negative"), ("not_equal", "not_equal"), @@ -91,8 +91,6 @@ ('invert', 'invert'), ('isnan', 'isnan'), ('isinf', 'isinf'), - ('isneginf', 'isneginf'), - ('isposinf', 'isposinf'), ('isfinite', 'isfinite'), ('logical_and', 'logical_and'), ('logical_xor', 'logical_xor'), diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -102,13 +102,10 @@ dtype = self.dtype.float_type return SliceArray(self.start + dtype.get_size(), strides, backstrides, self.get_shape(), self, orig_array, dtype=dtype) - if self.dtype.is_flexible_type(): - # numpy returns self for self.imag - return SliceArray(self.start, strides, backstrides, - self.get_shape(), self, orig_array) impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, backstrides) - impl.fill(self.dtype.box(0)) + if not self.dtype.is_flexible_type(): + impl.fill(self.dtype.box(0)) return impl def set_imag(self, space, orig_array, w_value): @@ -129,7 +126,8 @@ idx = self.get_shape()[i] + idx if idx < 0 or idx >= self.get_shape()[i]: raise operationerrfmt(space.w_IndexError, - "index (%d) out of range (0<=index<%d", i, self.get_shape()[i], + "index %d is out of bounds for axis %d with size %d", + idx, i, self.get_shape()[i], ) item += idx * strides[i] return item @@ -145,7 +143,8 @@ idx = shape[i] + idx if idx < 0 or idx >= shape[i]: raise operationerrfmt(space.w_IndexError, - "index (%d) out of range (0<=index<%d", i, shape[i], + "index %d is out of bounds for axis %d with size %d", + idx, i, self.get_shape()[i], ) item += idx * strides[i] return item @@ -380,8 +379,8 @@ class NonWritableArray(ConcreteArray): def descr_setitem(self, space, orig_array, w_index, w_value): - raise OperationError(space.w_RuntimeError, space.wrap( - "array is not writable")) + raise OperationError(space.w_ValueError, space.wrap( + "assignment destination is read-only")) class SliceArray(BaseConcreteArray): diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -8,7 +8,7 @@ from rpython.rlib.rawstorage import raw_storage_getitem, raw_storage_setitem, \ free_raw_storage, alloc_raw_storage from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rarithmetic import widen from rpython.rlib.objectmodel import specialize from pypy.interpreter.error import OperationError from pypy.module.micronumpy.base import W_NDimArray @@ -43,7 +43,7 @@ + self.start + step * i) v.append(_v) if comp_type == 'int': - v = intmask(v) + v = widen(v) elif comp_type == 'float': v = float(v) elif comp_type == 'complex': @@ -100,10 +100,15 @@ if count < 2: def arg_lt(a, b): # Does numpy do <= ? - return a[0] < b[0] + return a[0] < b[0] or b[0] != b[0] and a[0] == a[0] else: def arg_lt(a, b): for i in range(count): + if b[0][i] != b[0][i] and a[0][i] == a[0][i]: + return True + elif b[0][i] == b[0][i] and a[0][i] != a[0][i]: + return False + for i in range(count): if a[0][i] < b[0][i]: return True elif a[0][i] > b[0][i]: @@ -200,7 +205,7 @@ + self.start + step * i) v.append(_v) if comp_type == 'int': - v = intmask(v) + v = widen(v) elif comp_type == 'float': v = float(v) elif comp_type == 'complex': @@ -318,7 +323,8 @@ all_types = (types.all_float_types + types.all_complex_types + types.all_int_types) -all_types = [i for i in all_types if not '_mixin_' in i[0].__dict__] +all_types = [i for i in all_types if not '_mixin_' in i[0].__dict__ and + not issubclass(i[0], types.BaseFloat16)] all_types = unrolling_iterable(all_types) class ArgSortCache(object): diff --git a/pypy/module/micronumpy/constants.py b/pypy/module/micronumpy/constants.py --- a/pypy/module/micronumpy/constants.py +++ b/pypy/module/micronumpy/constants.py @@ -1,4 +1,21 @@ +from pypy.interpreter.error import OperationError -MODE_WRAP, MODE_RAISE, MODE_CLIP = range(3) +MODE_CLIP, MODE_WRAP, MODE_RAISE = range(3) -MODES = {'wrap': MODE_WRAP, 'raise': MODE_RAISE, 'clip': MODE_CLIP} +def clipmode_converter(space, w_mode): + if space.is_none(w_mode): + return MODE_RAISE + if space.isinstance_w(w_mode, space.w_str): + mode = space.str_w(w_mode) + if mode.startswith('C') or mode.startswith('c'): + return MODE_CLIP + if mode.startswith('W') or mode.startswith('w'): + return MODE_WRAP + if mode.startswith('R') or mode.startswith('r'): + return MODE_RAISE + elif space.isinstance_w(w_mode, space.w_int): + mode = space.int_w(w_mode) + if MODE_CLIP <= mode <= MODE_RAISE: + return mode + raise OperationError(space.w_TypeError, + space.wrap("clipmode not understood")) diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -1,10 +1,9 @@ - from pypy.module.micronumpy.base import convert_to_array, W_NDimArray -from pypy.module.micronumpy import loop, interp_dtype, interp_ufuncs +from pypy.module.micronumpy import loop, interp_dtype, interp_ufuncs, constants from pypy.module.micronumpy.iter import Chunk, Chunks from pypy.module.micronumpy.strides import shape_agreement,\ shape_agreement_multiple -from pypy.module.micronumpy.constants import MODES +from pypy.module.micronumpy.constants import clipmode_converter from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec @@ -118,12 +117,12 @@ shape[i] += axis_size a_dt = arr.get_dtype() if dtype.is_record_type() and a_dt.is_record_type(): - #Record types must match + # Record types must match for f in dtype.fields: if f not in a_dt.fields or \ dtype.fields[f] != a_dt.fields[f]: raise OperationError(space.w_TypeError, - space.wrap("record type mismatch")) + space.wrap("invalid type promotion")) elif dtype.is_record_type() or a_dt.is_record_type(): raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) @@ -171,8 +170,7 @@ def count_nonzero(space, w_obj): return space.wrap(loop.count_all_true(convert_to_array(space, w_obj))) - at unwrap_spec(mode=str) -def choose(space, w_arr, w_choices, w_out, mode): +def choose(space, w_arr, w_choices, w_out, w_mode): arr = convert_to_array(space, w_arr) choices = [convert_to_array(space, w_item) for w_item in space.listview(w_choices)] @@ -187,23 +185,16 @@ shape = shape_agreement_multiple(space, choices + [w_out]) out = interp_dtype.dtype_agreement(space, choices, shape, w_out) dtype = out.get_dtype() - if mode not in MODES: - raise OperationError(space.w_ValueError, - space.wrap("mode %s not known" % (mode,))) - loop.choose(space, arr, choices, shape, dtype, out, MODES[mode]) + mode = clipmode_converter(space, w_mode) + loop.choose(space, arr, choices, shape, dtype, out, mode) return out - - at unwrap_spec(mode=str) -def put(space, w_arr, w_indices, w_values, mode='raise'): - from pypy.module.micronumpy import constants +def put(space, w_arr, w_indices, w_values, w_mode): from pypy.module.micronumpy.support import int_w arr = convert_to_array(space, w_arr) + mode = clipmode_converter(space, w_mode) - if mode not in constants.MODES: - raise OperationError(space.w_ValueError, - space.wrap("mode %s not known" % (mode,))) if not w_indices: raise OperationError(space.w_ValueError, space.wrap("indice list cannot be empty")) @@ -228,13 +219,13 @@ index = int_w(space, idx) if index < 0 or index >= arr.get_size(): - if constants.MODES[mode] == constants.MODE_RAISE: - raise OperationError(space.w_ValueError, space.wrap( - "invalid entry in choice array")) - elif constants.MODES[mode] == constants.MODE_WRAP: + if mode == constants.MODE_RAISE: + raise OperationError(space.w_IndexError, space.wrap( + "index %d is out of bounds for axis 0 with size %d" % (index, arr.get_size()))) + elif mode == constants.MODE_WRAP: index = index % arr.get_size() else: - assert constants.MODES[mode] == constants.MODE_CLIP + assert mode == constants.MODE_CLIP if index < 0: index = 0 else: @@ -247,7 +238,6 @@ arr.setitem(space, [index], dtype.coerce(space, value)) - def diagonal(space, arr, offset, axis1, axis2): shape = arr.get_shape() shapelen = len(shape) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -20,14 +20,14 @@ MIXIN_32 = (int_typedef,) if LONG_BIT == 32 else () MIXIN_64 = (int_typedef,) if LONG_BIT == 64 else () -# Is this the proper place for this? -ENABLED_LONG_DOUBLE = False -long_double_size = rffi.sizeof_c_type('long double', ignore_errors=True) +#long_double_size = rffi.sizeof_c_type('long double', ignore_errors=True) +#import os +#if long_double_size == 8 and os.name == 'nt': +# # this is a lie, or maybe a wish, MS fakes longdouble math with double +# long_double_size = 12 -import os -if long_double_size == 8 and os.name == 'nt': - # this is a lie, or maybe a wish, MS fakes longdouble math with double - long_double_size = 12 +# hardcode to 8 for now (simulate using normal double) until long double works +long_double_size = 8 def new_dtype_getter(name): @@ -63,6 +63,7 @@ class PrimitiveBox(Box): _mixin_ = True + _immutable_fields_ = ['value'] def __init__(self, value): self.value = value @@ -82,11 +83,11 @@ ret = builder.build() lltype.free(value, flavor="raw") - return ret class ComplexBox(Box): _mixin_ = True + _immutable_fields_ = ['real', 'imag'] def __init__(self, real, imag=0.): self.real = real @@ -111,11 +112,11 @@ ret = builder.build() lltype.free(value, flavor="raw") - return ret + class W_GenericBox(W_Root): - _attrs_ = () + _attrs_ = [] def descr__new__(space, w_subtype, __args__): raise operationerrfmt(space.w_TypeError, @@ -125,12 +126,21 @@ def get_dtype(self, space): return self._get_dtype(space) + def item(self, space): + return self.get_dtype(space).itemtype.to_builtin_type(space, self) + def descr_str(self, space): return space.wrap(self.get_dtype(space).itemtype.str_format(self)) def descr_format(self, space, w_spec): return space.format(self.item(space), w_spec) + def descr_hash(self, space): + return space.hash(self.item(space)) + + def descr_index(self, space): + return space.index(self.item(space)) + def descr_int(self, space): box = self.convert_to(W_LongBox._get_dtype(space)) assert isinstance(box, W_LongBox) @@ -222,19 +232,13 @@ w_remainder = self.descr_rmod(space, w_other) return space.newtuple([w_quotient, w_remainder]) - def descr_hash(self, space): - return space.hash(self.item(space)) - - def item(self, space): - return self.get_dtype(space).itemtype.to_builtin_type(space, self) - def descr_any(self, space): value = space.is_true(self) - return space.wrap(W_BoolBox(value)) + return self.get_dtype(space).box(value) def descr_all(self, space): value = space.is_true(self) - return space.wrap(W_BoolBox(value)) + return self.get_dtype(space).box(value) def descr_ravel(self, space): from pypy.module.micronumpy.base import convert_to_array @@ -260,7 +264,7 @@ descr__new__, _get_dtype, descr_reduce = new_dtype_getter("bool") class W_NumberBox(W_GenericBox): - _attrs_ = () + pass class W_IntegerBox(W_NumberBox): def int_w(self, space): @@ -309,10 +313,10 @@ descr__new__, _get_dtype, descr_reduce = new_dtype_getter('ulonglong') class W_InexactBox(W_NumberBox): - _attrs_ = () + pass class W_FloatingBox(W_InexactBox): - _attrs_ = () + pass class W_Float16Box(W_FloatingBox, PrimitiveBox): descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float16") @@ -323,9 +327,43 @@ class W_Float64Box(W_FloatingBox, PrimitiveBox): descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float64") +class W_ComplexFloatingBox(W_InexactBox): + def descr_get_real(self, space): + dtype = self._COMPONENTS_BOX._get_dtype(space) + box = self.convert_real_to(dtype) + assert isinstance(box, self._COMPONENTS_BOX) + return space.wrap(box) + + def descr_get_imag(self, space): + dtype = self._COMPONENTS_BOX._get_dtype(space) + box = self.convert_imag_to(dtype) + assert isinstance(box, self._COMPONENTS_BOX) + return space.wrap(box) + +class W_Complex64Box(ComplexBox, W_ComplexFloatingBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex64") + _COMPONENTS_BOX = W_Float32Box + +class W_Complex128Box(ComplexBox, W_ComplexFloatingBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex128") + _COMPONENTS_BOX = W_Float64Box + +if long_double_size == 8: + W_FloatLongBox = W_Float64Box + W_ComplexLongBox = W_Complex128Box + +elif long_double_size in (12, 16): + class W_FloatLongBox(W_FloatingBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float%d" % (long_double_size * 8)) + + class W_ComplexLongBox(ComplexBox, W_ComplexFloatingBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex%d" % (long_double_size * 16)) + _COMPONENTS_BOX = W_FloatLongBox + class W_FlexibleBox(W_GenericBox): - _attrs_ = ['ofs', 'dtype', 'arr'] - _immutable_fields_ = ['ofs'] + _attrs_ = ['arr', 'ofs', 'dtype'] + _immutable_fields_ = ['arr', 'ofs', 'dtype'] + def __init__(self, arr, ofs, dtype): self.arr = arr # we have to keep array alive self.ofs = ofs @@ -334,11 +372,6 @@ def get_dtype(self, space): return self.arr.dtype - at unwrap_spec(self=W_GenericBox) -def descr_index(space, self): - return space.index(self.item(space)) - - class W_VoidBox(W_FlexibleBox): def descr_getitem(self, space, w_item): from pypy.module.micronumpy.types import VoidType @@ -388,7 +421,6 @@ # XXX assert dtype is str type return self - class W_StringBox(W_CharacterBox): def descr__new__string_box(space, w_subtype, w_arg): from pypy.module.micronumpy.interp_dtype import new_string_dtype @@ -398,7 +430,6 @@ arr.storage[i] = arg[i] return W_StringBox(arr, 0, arr.dtype) - class W_UnicodeBox(W_CharacterBox): def descr__new__unicode_box(space, w_subtype, w_arg): raise OperationError(space.w_NotImplementedError, space.wrap("Unicode is not supported yet")) @@ -413,59 +444,6 @@ # arr.storage[i] = arg[i] return W_UnicodeBox(arr, 0, arr.dtype) - -class W_ComplexFloatingBox(W_InexactBox): - _attrs_ = () - def descr_get_real(self, space): - dtype = self._COMPONENTS_BOX._get_dtype(space) - box = self.convert_real_to(dtype) - assert isinstance(box, self._COMPONENTS_BOX) - return space.wrap(box) - - def descr_get_imag(self, space): - dtype = self._COMPONENTS_BOX._get_dtype(space) - box = self.convert_imag_to(dtype) - assert isinstance(box, self._COMPONENTS_BOX) - return space.wrap(box) - - -class W_Complex64Box(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex64") - _COMPONENTS_BOX = W_Float32Box - - -class W_Complex128Box(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex128") - _COMPONENTS_BOX = W_Float64Box - -if ENABLED_LONG_DOUBLE and long_double_size == 12: - class W_Float96Box(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float96") - - W_LongDoubleBox = W_Float96Box - - class W_Complex192Box(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex192") - _COMPONENTS_BOX = W_Float96Box - - W_CLongDoubleBox = W_Complex192Box - -elif ENABLED_LONG_DOUBLE and long_double_size == 16: - class W_Float128Box(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float128") - W_LongDoubleBox = W_Float128Box - - class W_Complex256Box(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex256") - _COMPONENTS_BOX = W_Float128Box - - W_CLongDoubleBox = W_Complex256Box - -elif ENABLED_LONG_DOUBLE: - W_LongDoubleBox = W_Float64Box - W_CLongDoubleBox = W_Complex64Box - - W_GenericBox.typedef = TypeDef("generic", __module__ = "numpypy", @@ -535,7 +513,7 @@ W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_BoolBox.descr__new__.im_func), - __index__ = interp2app(descr_index), + __index__ = interp2app(W_BoolBox.descr_index), __reduce__ = interp2app(W_BoolBox.descr_reduce), ) @@ -558,49 +536,49 @@ W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_Int8Box.descr__new__.im_func), - __index__ = interp2app(descr_index), + __index__ = interp2app(W_Int8Box.descr_index), __reduce__ = interp2app(W_Int8Box.descr_reduce), ) W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_UInt8Box.descr__new__.im_func), - __index__ = interp2app(descr_index), + __index__ = interp2app(W_UInt8Box.descr_index), __reduce__ = interp2app(W_UInt8Box.descr_reduce), ) W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_Int16Box.descr__new__.im_func), - __index__ = interp2app(descr_index), + __index__ = interp2app(W_Int16Box.descr_index), __reduce__ = interp2app(W_Int16Box.descr_reduce), ) W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_UInt16Box.descr__new__.im_func), - __index__ = interp2app(descr_index), + __index__ = interp2app(W_UInt16Box.descr_index), __reduce__ = interp2app(W_UInt16Box.descr_reduce), ) W_Int32Box.typedef = TypeDef("int32", (W_SignedIntegerBox.typedef,) + MIXIN_32, __module__ = "numpypy", __new__ = interp2app(W_Int32Box.descr__new__.im_func), - __index__ = interp2app(descr_index), + __index__ = interp2app(W_Int32Box.descr_index), __reduce__ = interp2app(W_Int32Box.descr_reduce), ) W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_UInt32Box.descr__new__.im_func), - __index__ = interp2app(descr_index), + __index__ = interp2app(W_UInt32Box.descr_index), __reduce__ = interp2app(W_UInt32Box.descr_reduce), ) W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, __module__ = "numpypy", __new__ = interp2app(W_Int64Box.descr__new__.im_func), - __index__ = interp2app(descr_index), + __index__ = interp2app(W_Int64Box.descr_index), __reduce__ = interp2app(W_Int64Box.descr_reduce), ) @@ -614,7 +592,7 @@ W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef, __module__ = "numpypy", __new__ = interp2app(W_UInt64Box.descr__new__.im_func), - __index__ = interp2app(descr_index), + __index__ = interp2app(W_UInt64Box.descr_index), __reduce__ = interp2app(W_UInt64Box.descr_reduce), ) @@ -628,53 +606,53 @@ W_Float16Box.typedef = TypeDef("float16", W_FloatingBox.typedef, __module__ = "numpypy", - __new__ = interp2app(W_Float16Box.descr__new__.im_func), __reduce__ = interp2app(W_Float16Box.descr_reduce), ) W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, __module__ = "numpypy", - __new__ = interp2app(W_Float32Box.descr__new__.im_func), __reduce__ = interp2app(W_Float32Box.descr_reduce), ) W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), __module__ = "numpypy", - __new__ = interp2app(W_Float64Box.descr__new__.im_func), __reduce__ = interp2app(W_Float64Box.descr_reduce), ) -if ENABLED_LONG_DOUBLE and long_double_size == 12: - W_Float96Box.typedef = TypeDef("float96", (W_FloatingBox.typedef), +W_ComplexFloatingBox.typedef = TypeDef("complexfloating", W_InexactBox.typedef, + __module__ = "numpypy", +) + +W_Complex64Box.typedef = TypeDef("complex64", (W_ComplexFloatingBox.typedef), + __module__ = "numpypy", + __new__ = interp2app(W_Complex64Box.descr__new__.im_func), + __reduce__ = interp2app(W_Complex64Box.descr_reduce), + real = GetSetProperty(W_ComplexFloatingBox .descr_get_real), + imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), +) + +W_Complex128Box.typedef = TypeDef("complex128", (W_ComplexFloatingBox.typedef, complex_typedef), + __module__ = "numpypy", + __new__ = interp2app(W_Complex128Box.descr__new__.im_func), + __reduce__ = interp2app(W_Complex128Box.descr_reduce), + real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), + imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), +) + +if long_double_size in (12, 16): + W_FloatLongBox.typedef = TypeDef("float%d" % (long_double_size * 8), (W_FloatingBox.typedef), __module__ = "numpypy", - __reduce__ = interp2app(W_Float96Box.descr_reduce), - - __new__ = interp2app(W_Float96Box.descr__new__.im_func), + __new__ = interp2app(W_FloatLongBox.descr__new__.im_func), + __reduce__ = interp2app(W_FloatLongBox.descr_reduce), ) - W_Complex192Box.typedef = TypeDef("complex192", (W_ComplexFloatingBox.typedef, complex_typedef), + W_ComplexLongBox.typedef = TypeDef("complex%d" % (long_double_size * 16), (W_ComplexFloatingBox.typedef, complex_typedef), __module__ = "numpypy", - __new__ = interp2app(W_Complex192Box.descr__new__.im_func), - __reduce__ = interp2app(W_Complex192Box.descr_reduce), - real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), - imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), - ) - -elif ENABLED_LONG_DOUBLE and long_double_size == 16: - W_Float128Box.typedef = TypeDef("float128", (W_FloatingBox.typedef), - __module__ = "numpypy", - - __new__ = interp2app(W_Float128Box.descr__new__.im_func), - __reduce__ = interp2app(W_Float128Box.descr_reduce), - ) - - W_Complex256Box.typedef = TypeDef("complex256", (W_ComplexFloatingBox.typedef, complex_typedef), - __module__ = "numpypy", - __new__ = interp2app(W_Complex256Box.descr__new__.im_func), - __reduce__ = interp2app(W_Complex256Box.descr_reduce), + __new__ = interp2app(W_ComplexLongBox.descr__new__.im_func), + __reduce__ = interp2app(W_ComplexLongBox.descr_reduce), real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) @@ -703,24 +681,3 @@ __module__ = "numpypy", __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), ) - -W_ComplexFloatingBox.typedef = TypeDef("complexfloating", W_InexactBox.typedef, - __module__ = "numpypy", -) - - -W_Complex128Box.typedef = TypeDef("complex128", (W_ComplexFloatingBox.typedef, complex_typedef), - __module__ = "numpypy", - __new__ = interp2app(W_Complex128Box.descr__new__.im_func), - __reduce__ = interp2app(W_Complex128Box.descr_reduce), - real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), - imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), -) - -W_Complex64Box.typedef = TypeDef("complex64", (W_ComplexFloatingBox.typedef), - __module__ = "numpypy", - __new__ = interp2app(W_Complex64Box.descr__new__.im_func), - __reduce__ = interp2app(W_Complex64Box.descr_reduce), - real = GetSetProperty(W_ComplexFloatingBox .descr_get_real), - imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), -) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,4 +1,3 @@ - import sys from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, operationerrfmt @@ -11,6 +10,12 @@ from rpython.rtyper.lltypesystem import rffi from rpython.rlib import jit +if sys.byteorder == 'little': + byteorder_prefix = '<' + nonnative_byteorder_prefix = '>' +else: + byteorder_prefix = '>' + nonnative_byteorder_prefix = '<' UNSIGNEDLTR = "u" SIGNEDLTR = "i" @@ -44,12 +49,11 @@ out = base.W_NDimArray.from_shape(space, shape, dtype) return out - class W_Dtype(W_Root): _immutable_fields_ = ["itemtype", "num", "kind", "shape"] def __init__(self, itemtype, num, kind, name, char, w_box_type, - alternate_constructors=[], aliases=[], + alternate_constructors=[], aliases=[], float_type=None, fields=None, fieldnames=None, native=True, shape=[], subdtype=None): self.itemtype = itemtype self.num = num @@ -59,10 +63,10 @@ self.w_box_type = w_box_type self.alternate_constructors = alternate_constructors self.aliases = aliases + self.float_type = float_type self.fields = fields self.fieldnames = fieldnames self.native = native - self.float_type = None self.shape = list(shape) self.subdtype = subdtype if not subdtype: @@ -148,7 +152,11 @@ def eq(self, space, w_other): w_other = space.call_function(space.gettypefor(W_Dtype), w_other) - return space.is_w(self, w_other) + if space.is_w(self, w_other): + return True + if isinstance(w_other, W_Dtype): + return space.eq_w(self.descr_reduce(space), w_other.descr_reduce(space)) + return False def descr_eq(self, space, w_other): return space.wrap(self.eq(space, w_other)) @@ -223,7 +231,7 @@ return self.kind == SIGNEDLTR def is_complex_type(self): - return False + return self.kind == COMPLEXLTR def is_float_type(self): return (self.kind == FLOATINGLTR or self.float_type is not None) @@ -259,21 +267,22 @@ builder_args = space.newtuple([space.wrap("%s%d" % (kind, elemsize)), space.wrap(0), space.wrap(1)]) version = space.wrap(3) - order = space.wrap(byteorder_prefix if self.native else nonnative_byteorder_prefix) names = self.descr_get_names(space) values = self.descr_get_fields(space) if self.fields: + order = space.wrap('|') #TODO: Implement this when subarrays are implemented subdescr = space.w_None - #TODO: Change this when alignment is implemented : size = 0 for key in self.fields: dtype = self.fields[key][1] assert isinstance(dtype, W_Dtype) size += dtype.get_size() w_size = space.wrap(size) + #TODO: Change this when alignment is implemented alignment = space.wrap(1) else: + order = space.wrap(byteorder_prefix if self.native else nonnative_byteorder_prefix) subdescr = space.w_None w_size = space.wrap(-1) alignment = space.wrap(-1) @@ -295,18 +304,6 @@ fields = space.getitem(w_data, space.wrap(4)) self.set_fields(space, fields) -class W_ComplexDtype(W_Dtype): - def __init__(self, itemtype, num, kind, name, char, w_box_type, - alternate_constructors=[], aliases=[], - fields=None, fieldnames=None, native=True, float_type=None): - W_Dtype.__init__(self, itemtype, num, kind, name, char, w_box_type, - alternate_constructors=alternate_constructors, aliases=aliases, - fields=fields, fieldnames=fieldnames, native=native) - self.float_type = float_type - - def is_complex_type(self): - return True - def dtype_from_list(space, w_lst): lst_w = space.listview(w_lst) fields = {} @@ -340,38 +337,6 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "dtype from dict")) -def variable_dtype(space, name): - if name[0] in '<>=': - name = name[1:] - char = name[0] - if len(name) == 1: - size = 0 - else: - try: - size = int(name[1:]) - except ValueError: - raise OperationError(space.w_TypeError, space.wrap("data type not understood")) - if char == 'S' or char == 'c': - itemtype = types.StringType(size) - basename = 'string' - num = 18 - w_box_type = space.gettypefor(interp_boxes.W_StringBox) - elif char == 'V': - num = 20 - basename = 'void' - itemtype = types.VoidType(size) - return W_Dtype(itemtype, 20, VOIDLTR, "void" + str(size), - "V", space.gettypefor(interp_boxes.W_VoidBox)) - else: - assert char == 'U' - basename = 'unicode' - itemtype = types.UnicodeType(size) - num = 19 - w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox) - return W_Dtype(itemtype, num, char, - basename + str(8 * itemtype.get_element_size()), - char, w_box_type) - def dtype_from_spec(space, name): raise OperationError(space.w_NotImplementedError, space.wrap( "dtype from spec")) @@ -455,30 +420,61 @@ ) W_Dtype.typedef.acceptable_as_base_class = False -if sys.byteorder == 'little': - byteorder_prefix = '<' - nonnative_byteorder_prefix = '>' -else: - byteorder_prefix = '>' - nonnative_byteorder_prefix = '<' + +def variable_dtype(space, name): + if name[0] in '<>=': + name = name[1:] + char = name[0] + if len(name) == 1: + size = 0 + else: + try: + size = int(name[1:]) + except ValueError: + raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + if char == 'c': + char = 'S' + size = 1 + if char == 'S': + itemtype = types.StringType(size) + basename = 'string' + num = 18 + w_box_type = space.gettypefor(interp_boxes.W_StringBox) + elif char == 'V': + num = 20 + basename = 'void' + itemtype = types.VoidType(size) + return W_Dtype(itemtype, 20, VOIDLTR, "void" + str(size), + "V", space.gettypefor(interp_boxes.W_VoidBox)) + else: + assert char == 'U' + basename = 'unicode' + itemtype = types.UnicodeType(size) + num = 19 + w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox) + return W_Dtype(itemtype, num, char, + basename + str(8 * itemtype.get_element_size()), + char, w_box_type) def new_string_dtype(space, size): + itemtype = types.StringType(size) return W_Dtype( - types.StringType(size), + itemtype, num=18, kind=STRINGLTR, - name='string', - char='S' + str(size), + name='string' + str(8 * itemtype.get_element_size()), + char='S', w_box_type = space.gettypefor(interp_boxes.W_StringBox), ) def new_unicode_dtype(space, size): + itemtype = types.UnicodeType(size) return W_Dtype( - types.UnicodeType(size), + itemtype, num=19, kind=UNICODELTR, - name='unicode', - char='U' + str(size), + name='unicode' + str(8 * itemtype.get_element_size()), + char='U', w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), ) @@ -542,15 +538,11 @@ char="I", w_box_type=space.gettypefor(interp_boxes.W_UInt32Box), ) - if LONG_BIT == 32: - name = "int32" - elif LONG_BIT == 64: - name = "int64" self.w_longdtype = W_Dtype( types.Long(), num=7, kind=SIGNEDLTR, - name=name, + name="int%d" % LONG_BIT, char="l", w_box_type=space.gettypefor(interp_boxes.W_LongBox), alternate_constructors=[space.w_int, @@ -563,7 +555,7 @@ types.ULong(), num=8, kind=UNSIGNEDLTR, - name="u" + name, + name="uint%d" % LONG_BIT, char="L", w_box_type=space.gettypefor(interp_boxes.W_ULongBox), alternate_constructors=[ space.gettypefor(interp_boxes.W_UnsignedIntegerBox), @@ -607,7 +599,16 @@ ], aliases=["float", "double"], ) - self.w_complex64dtype = W_ComplexDtype( + self.w_floatlongdtype = W_Dtype( + types.FloatLong(), + num=13, + kind=FLOATINGLTR, + name="float%d" % (interp_boxes.long_double_size * 8), + char="g", + w_box_type=space.gettypefor(interp_boxes.W_FloatLongBox), + aliases=["longdouble", "longfloat"], + ) + self.w_complex64dtype = W_Dtype( types.Complex64(), num=14, kind=COMPLEXLTR, @@ -616,7 +617,7 @@ w_box_type = space.gettypefor(interp_boxes.W_Complex64Box), float_type = self.w_float32dtype, ) - self.w_complex128dtype = W_ComplexDtype( + self.w_complex128dtype = W_Dtype( types.Complex128(), num=15, kind=COMPLEXLTR, @@ -627,57 +628,16 @@ aliases=["complex"], float_type = self.w_float64dtype, ) - if interp_boxes.ENABLED_LONG_DOUBLE and interp_boxes.long_double_size == 12: - self.w_float96dtype = W_Dtype( - types.Float96(), - num=13, - kind=FLOATINGLTR, - name="float96", - char="g", - w_box_type=space.gettypefor(interp_boxes.W_Float96Box), - aliases=["longdouble", "longfloat"], - ) - self.w_complex192dtype = W_ComplexDtype( - types.Complex192(), - num=16, - kind=COMPLEXLTR, - name="complex192", - char="G", - w_box_type = space.gettypefor(interp_boxes.W_Complex192Box), - alternate_constructors=[space.w_complex], - aliases=["clongdouble", "clongfloat"], - float_type = self.w_float96dtype, - ) - self.w_longdouble = self.w_float96dtype - self.w_clongdouble = self.w_complex192dtype - elif interp_boxes.ENABLED_LONG_DOUBLE and interp_boxes.long_double_size == 16: - self.w_float128dtype = W_Dtype( - types.Float128(), - num=13, - kind=FLOATINGLTR, - name="float128", - char="g", - w_box_type=space.gettypefor(interp_boxes.W_Float128Box), - aliases=["longdouble", "longfloat"], - ) - self.w_complex256dtype = W_ComplexDtype( - types.Complex256(), - num=16, - kind=COMPLEXLTR, - name="complex256", - char="G", - w_box_type = space.gettypefor(interp_boxes.W_Complex256Box), - alternate_constructors=[space.w_complex], - aliases=["clongdouble", "clongfloat"], - float_type = self.w_float128dtype, - ) - self.w_longdouble = self.w_float128dtype - self.w_clongdouble = self.w_complex256dtype - elif interp_boxes.ENABLED_LONG_DOUBLE: - self.w_float64dtype.aliases += ["longdouble", "longfloat"] - self.w_complex128dtype.aliases += ["clongdouble", "clongfloat"] - self.w_longdouble = self.w_float64dtype - self.w_clongdouble = self.w_complex128dtype + self.w_complexlongdtype = W_Dtype( + types.ComplexLong(), + num=16, + kind=COMPLEXLTR, + name="complex%d" % (interp_boxes.long_double_size * 16), + char="G", + w_box_type = space.gettypefor(interp_boxes.W_ComplexLongBox), + aliases=["clongdouble", "clongfloat"], + float_type = self.w_floatlongdtype, + ) self.w_stringdtype = W_Dtype( types.StringType(0), num=18, @@ -750,21 +710,18 @@ char=UINTPLTR, w_box_type = space.gettypefor(uintp_box), ) - float_dtypes = [self.w_float16dtype, - self.w_float32dtype, self.w_float64dtype, - ] - complex_dtypes = [self.w_complex64dtype, self.w_complex128dtype] - if interp_boxes.ENABLED_LONG_DOUBLE: - float_dtypes.append(self.w_longdouble) - complex_dtypes.append(self.w_clongdouble) + float_dtypes = [self.w_float16dtype, self.w_float32dtype, + self.w_float64dtype, self.w_floatlongdtype] + complex_dtypes = [self.w_complex64dtype, self.w_complex128dtype, + self.w_complexlongdtype] self.builtin_dtypes = [ self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, self.w_int16dtype, self.w_uint16dtype, self.w_longdtype, self.w_ulongdtype, self.w_int32dtype, self.w_uint32dtype, - self.w_int64dtype, self.w_uint64dtype] + \ - float_dtypes + complex_dtypes + [ + self.w_int64dtype, self.w_uint64dtype, + ] + float_dtypes + complex_dtypes + [ self.w_stringdtype, self.w_unicodedtype, self.w_voiddtype, self.w_intpdtype, self.w_uintpdtype, ] @@ -818,6 +775,7 @@ 'STRING': self.w_stringdtype, 'CFLOAT': self.w_complex64dtype, 'CDOUBLE': self.w_complex128dtype, + 'CLONGDOUBLE': self.w_complexlongdtype, #'DATETIME', 'UINT': self.w_uint32dtype, 'INTP': self.w_intpdtype, @@ -827,13 +785,11 @@ #'TIMEDELTA', 'INT': self.w_int32dtype, 'DOUBLE': self.w_float64dtype, + 'LONGDOUBLE': self.w_floatlongdtype, 'USHORT': self.w_uint16dtype, 'FLOAT': self.w_float32dtype, 'BOOL': self.w_booldtype, } - if interp_boxes.ENABLED_LONG_DOUBLE: - typeinfo_full['LONGDOUBLE'] = self.w_longdouble - typeinfo_full['CLONGDOUBLE'] = self.w_clongdouble typeinfo_partial = { 'Generic': interp_boxes.W_GenericBox, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1,4 +1,3 @@ - from pypy.interpreter.error import operationerrfmt, OperationError from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault @@ -14,7 +13,7 @@ from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy import loop from pypy.module.micronumpy.dot import match_dot_shapes -from pypy.module.micronumpy.interp_arrayops import repeat, choose +from pypy.module.micronumpy.interp_arrayops import repeat, choose, put from pypy.module.micronumpy.arrayimpl import scalar from rpython.tool.sourcetools import func_with_new_name from rpython.rlib import jit @@ -421,8 +420,8 @@ [0] * len(self.get_shape())) assert isinstance(w_obj, interp_boxes.W_GenericBox) return w_obj.item(space) - raise OperationError(space.w_IndexError, - space.wrap("index out of bounds")) + raise OperationError(space.w_ValueError, + space.wrap("can only convert an array of size 1 to a Python scalar")) if space.isinstance_w(w_arg, space.w_int): if self.is_scalar(): raise OperationError(space.w_IndexError, @@ -509,9 +508,8 @@ loop.byteswap(self.implementation, w_res.implementation) return w_res - @unwrap_spec(mode=str) - def descr_choose(self, space, w_choices, w_out=None, mode='raise'): - return choose(space, self, w_choices, w_out, mode) + def descr_choose(self, space, w_choices, w_out=None, w_mode=None): + return choose(space, self, w_choices, w_out, w_mode) def descr_clip(self, space, w_min, w_max, w_out=None): if space.is_none(w_out): @@ -550,6 +548,12 @@ return interp_arrayops.diagonal(space, self.implementation, offset, axis1, axis2) + @unwrap_spec(offset=int, axis1=int, axis2=int) + def descr_trace(self, space, offset=0, axis1=0, axis2=1, + w_dtype=None, w_out=None): + diag = self.descr_diagonal(space, offset, axis1, axis2) + return diag.descr_sum(space, w_axis=space.wrap(-1), w_dtype=w_dtype, w_out=w_out) + def descr_dump(self, space, w_file): raise OperationError(space.w_NotImplementedError, space.wrap( "dump not implemented yet")) @@ -584,10 +588,8 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "ptp (peak to peak) not implemented yet")) - @unwrap_spec(mode=str) - def descr_put(self, space, w_indices, w_values, mode='raise'): - from pypy.module.micronumpy.interp_arrayops import put - put(space, self, w_indices, w_values, mode) + def descr_put(self, space, w_indices, w_values, w_mode=None): + put(space, self, w_indices, w_values, w_mode) def descr_resize(self, space, w_new_shape, w_refcheck=True): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -653,11 +655,6 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "tofile not implemented yet")) - def descr_trace(self, space, w_offset=0, w_axis1=0, w_axis2=1, - w_dtype=None, w_out=None): - raise OperationError(space.w_NotImplementedError, space.wrap( - "trace not implemented yet")) - def descr_view(self, space, w_dtype=None, w_type=None) : if not w_type and w_dtype: try: @@ -845,7 +842,7 @@ def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False, cumultative=False): - def impl(self, space, w_axis=None, w_out=None, w_dtype=None): + def impl(self, space, w_axis=None, w_dtype=None, w_out=None): if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): @@ -1153,6 +1150,7 @@ round = interp2app(W_NDimArray.descr_round), data = GetSetProperty(W_NDimArray.descr_get_data), diagonal = interp2app(W_NDimArray.descr_diagonal), + trace = interp2app(W_NDimArray.descr_trace), view = interp2app(W_NDimArray.descr_view), ctypes = GetSetProperty(W_NDimArray.descr_get_ctypes), # XXX unimplemented diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -16,23 +16,22 @@ def done_if_false(dtype, val): return not dtype.itemtype.bool(val) + class W_Ufunc(W_Root): - _attrs_ = ["name", "promote_to_float", "promote_bools", "identity", - "allow_complex", "complex_to_float"] - _immutable_fields_ = ["promote_to_float", "promote_bools", "name", - "allow_complex", "complex_to_float"] + _immutable_fields_ = ["name", "promote_to_float", "promote_bools", "identity", + "int_only", "allow_bool", "allow_complex", "complex_to_float"] def __init__(self, name, promote_to_float, promote_bools, identity, - int_only, allow_complex, complex_to_float): + int_only, allow_bool, allow_complex, complex_to_float): self.name = name self.promote_to_float = promote_to_float self.promote_bools = promote_bools + self.identity = identity + self.int_only = int_only + self.allow_bool = allow_bool self.allow_complex = allow_complex self.complex_to_float = complex_to_float - self.identity = identity - self.int_only = int_only - def descr_repr(self, space): return space.wrap("" % self.name) @@ -253,16 +252,14 @@ return res class W_Ufunc1(W_Ufunc): + _immutable_fields_ = ["func", "bool_result"] argcount = 1 - _immutable_fields_ = ["func", "name"] - def __init__(self, func, name, promote_to_float=False, promote_bools=False, - identity=None, bool_result=False, int_only=False, - allow_complex=True, complex_to_float=False): - + identity=None, bool_result=False, int_only=False, + allow_bool=True, allow_complex=True, complex_to_float=False): W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity, - int_only, allow_complex, complex_to_float) + int_only, allow_bool, allow_complex, complex_to_float) self.func = func self.bool_result = bool_result @@ -274,17 +271,19 @@ if space.is_w(out, space.w_None): out = None w_obj = convert_to_array(space, w_obj) - if w_obj.get_dtype().is_flexible_type(): + dtype = w_obj.get_dtype() + if dtype.is_flexible_type(): raise OperationError(space.w_TypeError, space.wrap('Not implemented for this type')) - if self.int_only and not w_obj.get_dtype().is_int_type(): + if (self.int_only and not dtype.is_int_type() or + not self.allow_bool and dtype.is_bool_type() or + not self.allow_complex and dtype.is_complex_type()): raise OperationError(space.w_TypeError, space.wrap( "ufunc %s not supported for the input type" % self.name)) calc_dtype = find_unaryop_result_dtype(space, w_obj.get_dtype(), promote_to_float=self.promote_to_float, - promote_bools=self.promote_bools, - allow_complex=self.allow_complex) + promote_bools=self.promote_bools) if out is not None: if not isinstance(out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( @@ -319,15 +318,14 @@ class W_Ufunc2(W_Ufunc): - _immutable_fields_ = ["comparison_func", "func", "name", "int_only"] + _immutable_fields_ = ["func", "comparison_func", "done_func"] argcount = 2 def __init__(self, func, name, promote_to_float=False, promote_bools=False, - identity=None, comparison_func=False, int_only=False, - allow_complex=True, complex_to_float=False): - + identity=None, comparison_func=False, int_only=False, + allow_bool=True, allow_complex=True, complex_to_float=False): W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity, - int_only, allow_complex, complex_to_float) + int_only, allow_bool, allow_complex, complex_to_float) self.func = func self.comparison_func = comparison_func if name == 'logical_and': @@ -375,16 +373,14 @@ w_rdtype = w_ldtype elif w_lhs.is_scalar() and not w_rhs.is_scalar(): w_ldtype = w_rdtype + if (self.int_only and (not w_ldtype.is_int_type() or not w_rdtype.is_int_type()) or + not self.allow_bool and (w_ldtype.is_bool_type() or w_rdtype.is_bool_type()) or + not self.allow_complex and (w_ldtype.is_complex_type() or w_rdtype.is_complex_type())): + raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) calc_dtype = find_binop_result_dtype(space, w_ldtype, w_rdtype, - int_only=self.int_only, promote_to_float=self.promote_to_float, - promote_bools=self.promote_bools, - allow_complex=self.allow_complex, - ) - if self.int_only and not calc_dtype.is_int_type(): - raise OperationError(space.w_TypeError, space.wrap( - "ufunc '%s' not supported for the input types" % self.name)) + promote_bools=self.promote_bools) if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): @@ -431,14 +427,10 @@ def find_binop_result_dtype(space, dt1, dt2, promote_to_float=False, - promote_bools=False, int_only=False, allow_complex=True): + promote_bools=False): # dt1.num should be <= dt2.num if dt1.num > dt2.num: dt1, dt2 = dt2, dt1 - if int_only and (not dt1.is_int_type() or not dt2.is_int_type()): - raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) - if not allow_complex and (dt1.is_complex_type() or dt2.is_complex_type()): - raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) # Some operations promote op(bool, bool) to return int8, rather than bool if promote_bools and (dt1.kind == dt2.kind == interp_dtype.BOOLLTR): return interp_dtype.get_dtype_cache(space).w_int8dtype @@ -449,8 +441,8 @@ return interp_dtype.get_dtype_cache(space).w_complex64dtype elif dt2.num == 15: return interp_dtype.get_dtype_cache(space).w_complex128dtype - elif interp_boxes.ENABLED_LONG_DOUBLE and dt2.num == 16: - return interp_dtype.get_dtype_cache(space).w_clongdouble + elif dt2.num == 16: + return interp_dtype.get_dtype_cache(space).w_complexlongdtype else: raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) @@ -507,14 +499,11 @@ dtypenum += 2 return interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum] - @jit.unroll_safe def find_unaryop_result_dtype(space, dt, promote_to_float=False, - promote_bools=False, promote_to_largest=False, allow_complex=True): + promote_bools=False, promote_to_largest=False): if promote_bools and (dt.kind == interp_dtype.BOOLLTR): return interp_dtype.get_dtype_cache(space).w_int8dtype - if not allow_complex and (dt.is_complex_type()): - raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) if promote_to_float: if dt.kind == interp_dtype.FLOATINGLTR or dt.kind==interp_dtype.COMPLEXLTR: return dt @@ -535,7 +524,6 @@ assert False return dt - def find_dtype_for_scalar(space, w_obj, current_guess=None): bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype @@ -588,7 +576,6 @@ 'unable to create dtype from objects, ' '"%T" instance not supported', w_obj) - def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func, bool_result): dtype_cache = interp_dtype.get_dtype_cache(space) @@ -606,6 +593,7 @@ return res return func_with_new_name(impl, ufunc_name) + class UfuncState(object): def __init__(self, space): "NOT_RPYTHON" @@ -635,10 +623,6 @@ ("greater_equal", "ge", 2, {"comparison_func": True}), ("isnan", "isnan", 1, {"bool_result": True}), ("isinf", "isinf", 1, {"bool_result": True}), - ("isneginf", "isneginf", 1, {"bool_result": True, - "allow_complex": False}), - ("isposinf", "isposinf", 1, {"bool_result": True, - "allow_complex": False}), ("isfinite", "isfinite", 1, {"bool_result": True}), ('logical_and', 'logical_and', 2, {'comparison_func': True, @@ -658,7 +642,7 @@ ("negative", "neg", 1), ("absolute", "abs", 1, {"complex_to_float": True}), ("rint", "rint", 1), - ("sign", "sign", 1, {"promote_bools": True}), + ("sign", "sign", 1, {"allow_bool": False}), ("signbit", "signbit", 1, {"bool_result": True, "allow_complex": False}), ("reciprocal", "reciprocal", 1), @@ -713,6 +697,7 @@ "allow_complex": False}), ("logaddexp2", "logaddexp2", 2, {"promote_to_float": True, "allow_complex": False}), + ("ones_like", "ones_like", 1), ("zeros_like", "zeros_like", 1), ]: diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -153,5 +153,12 @@ a = arange(5) a.put(22, -5, mode='wrap') assert (a == array([0, 1, -5, 3, 4])).all() - raises(ValueError, "arange(5).put(22, -5, mode='raise')") - raises(ValueError, "arange(5).put(22, -5, mode='wrongmode')") + raises(IndexError, "arange(5).put(22, -5, mode='raise')") + raises(IndexError, "arange(5).put(22, -5, mode=2)") # raise From noreply at buildbot.pypy.org Mon Nov 4 22:12:04 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 4 Nov 2013 22:12:04 +0100 (CET) Subject: [pypy-commit] pypy cpyext-int: merge default into branch Message-ID: <20131104211204.36DC31C01F2@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: cpyext-int Changeset: r67838:cc3307a7b330 Date: 2013-10-22 21:48 +0300 http://bitbucket.org/pypy/pypy/changeset/cc3307a7b330/ Log: merge default into branch diff --git a/pypy/tool/pypyjit.py b/pypy/tool/pypyjit.py --- a/pypy/tool/pypyjit.py +++ b/pypy/tool/pypyjit.py @@ -28,15 +28,16 @@ config.translation.backendopt.inline_threshold = 0.1 config.translation.gc = 'boehm' config.translating = True +config.translation.rweakref = False set_opt_level(config, level='jit') config.objspace.allworkingmodules = False config.objspace.usemodules.pypyjit = True config.objspace.usemodules.array = False -config.objspace.usemodules._weakref = True +config.objspace.usemodules._weakref = False config.objspace.usemodules._sre = False config.objspace.usemodules._lsprof = False # -config.objspace.usemodules._ffi = True +config.objspace.usemodules._ffi = False config.objspace.usemodules.micronumpy = False # set_pypy_opt_level(config, level='jit') @@ -101,7 +102,7 @@ from rpython.jit.codewriter.codewriter import CodeWriter CodeWriter.debug = True - from rpython.jit.tl.pypyjit_child import run_child + from pypy.tool.pypyjit_child import run_child run_child(globals(), locals()) diff --git a/pypy/tool/pypyjit_demo.py b/pypy/tool/pypyjit_demo.py --- a/pypy/tool/pypyjit_demo.py +++ b/pypy/tool/pypyjit_demo.py @@ -1,27 +1,20 @@ -import pypyjit -pypyjit.set_param(threshold=200) -kwargs = {"z": 1} +def g(i): + k = 0 + while k < 3: + k += 1 + return i + 1 -def f(*args, **kwargs): - result = g(1, *args, **kwargs) - return result + 2 +def f(x): + for i in range(10000): + t = (1, 2, i) + i = g(i) + x == t -def g(x, y, z=2): - return x - y + z - -def main(): - res = 0 - i = 0 - while i < 10000: - res = f(res, z=i) - g(1, res, **kwargs) - i += 1 - return res try: - print main() + f((1, 2, 3)) except Exception, e: print "Exception: ", type(e) diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -6,7 +6,7 @@ def __init__(self): self.reset() - def reset(self): + def reset(self, reset_virtuals=True): # contains boxes where the class is already known self.known_class_boxes = {} # store the boxes that contain newly allocated objects, this maps the @@ -14,7 +14,8 @@ # escaped the trace or not (True means the box never escaped, False # means it did escape), its presences in the mapping shows that it was # allocated inside the trace - self.new_boxes = {} + if reset_virtuals: + self.new_boxes = {} # Tracks which boxes should be marked as escaped when the key box # escapes. self.dependencies = {} diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2058,7 +2058,7 @@ duplicates[box] = None def reached_loop_header(self, greenboxes, redboxes, resumedescr): - self.heapcache.reset() + self.heapcache.reset(reset_virtuals=False) duplicates = {} self.remove_consts_and_duplicates(redboxes, len(redboxes), diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -3359,6 +3359,26 @@ assert res == main(1) self.check_resops(call=0, getfield_gc=0) + def test_isvirtual_call_assembler(self): + driver = JitDriver(greens = ['code'], reds = ['n']) + + @look_inside_iff(lambda t1, t2: isvirtual(t1)) + def g(t1, t2): + return t1[0] == t2[0] + + def f(code, n): + while n > 0: + driver.can_enter_jit(code=code, n=n) + driver.jit_merge_point(code=code, n=n) + t = (1, 2, n) + if code: + f(0, 3) + g(t, (1, 2, n)) + n -= 1 + + self.meta_interp(f, [1, 10], inline=True) + self.check_resops(call=0, call_may_force=0, call_assembler=2) + def test_reuse_elidable_result(self): driver = JitDriver(reds=['n', 's'], greens = []) def main(n): From noreply at buildbot.pypy.org Mon Nov 4 22:12:05 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 4 Nov 2013 22:12:05 +0100 (CET) Subject: [pypy-commit] pypy default: add out to np.dot and ndarray.dot Message-ID: <20131104211205.A80441C01F2@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r67839:4b5d0c9d1e79 Date: 2013-11-04 23:07 +0200 http://bitbucket.org/pypy/pypy/changeset/4b5d0c9d1e79/ Log: add out to np.dot and ndarray.dot diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -91,11 +91,11 @@ out = W_NDimArray.from_shape(space, shape, dtype) return loop.where(out, shape, arr, x, y, dtype) -def dot(space, w_obj1, w_obj2): +def dot(space, w_obj1, w_obj2, w_out=None): w_arr = convert_to_array(space, w_obj1) if w_arr.is_scalar(): - return convert_to_array(space, w_obj2).descr_dot(space, w_arr) - return w_arr.descr_dot(space, w_obj2) + return convert_to_array(space, w_obj2).descr_dot(space, w_arr, w_out) + return w_arr.descr_dot(space, w_obj2, w_out) @unwrap_spec(axis=int) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -853,7 +853,14 @@ w_remainder = self.descr_rmod(space, w_other) return space.newtuple([w_quotient, w_remainder]) - def descr_dot(self, space, w_other): + def descr_dot(self, space, w_other, w_out=None): + if space.is_none(w_out): + out = None + elif not isinstance(w_out, W_NDimArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) + else: + out = w_out other = convert_to_array(space, w_other) if other.is_scalar(): #Note: w_out is not modified, this is numpy compliant. @@ -861,7 +868,7 @@ elif len(self.get_shape()) < 2 and len(other.get_shape()) < 2: w_res = self.descr_mul(space, other) assert isinstance(w_res, W_NDimArray) - return w_res.descr_sum(space, space.wrap(-1)) + return w_res.descr_sum(space, space.wrap(-1), out) dtype = interp_ufuncs.find_binop_result_dtype(space, self.get_dtype(), other.get_dtype()) if self.get_size() < 1 and other.get_size() < 1: @@ -869,7 +876,25 @@ return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) # Do the dims match? out_shape, other_critical_dim = _match_dot_shapes(space, self, other) - w_res = W_NDimArray.from_shape(space, out_shape, dtype, w_instance=self) + if out: + matches = True + if len(out.get_shape()) != len(out_shape): + matches = False + else: + for i in range(len(out_shape)): + if out.get_shape()[i] != out_shape[i]: + matches = False + break + if dtype != out.get_dtype(): + matches = False + if not out.implementation.order == "C": + matches = False + if not matches: + raise OperationError(space.w_ValueError, space.wrap( + 'output array is not acceptable (must have the right type, nr dimensions, and be a C-Array)')) + w_res = out + else: + w_res = W_NDimArray.from_shape(space, out_shape, dtype, w_instance=self) # This is the place to add fpypy and blas return loop.multidim_dot(space, self, other, w_res, dtype, other_critical_dim) diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -84,6 +84,17 @@ c = array(3.0).dot(array(4)) assert c == 12.0 + def test_dot_out(self): + from numpypy import arange, dot + a = arange(12).reshape(3, 4) + b = arange(12).reshape(4, 3) + out = arange(9).reshape(3, 3) + c = dot(a, b, out=out) + assert (c == out).all() + out = arange(9,dtype=float).reshape(3, 3) + exc = raises(ValueError, dot, a, b, out) + assert exc.value[0].find('not acceptable') > 0 + def test_choose_basic(self): from numpypy import array a, b, c = array([1, 2, 3]), array([4, 5, 6]), array([7, 8, 9]) From noreply at buildbot.pypy.org Mon Nov 4 23:32:46 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 4 Nov 2013 23:32:46 +0100 (CET) Subject: [pypy-commit] pypy default: utilize rfloat.isfinite Message-ID: <20131104223246.352D61C00D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r67840:934879cb2719 Date: 2013-11-04 14:31 -0800 http://bitbucket.org/pypy/pypy/changeset/934879cb2719/ Log: utilize rfloat.isfinite diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -26,9 +26,6 @@ log2 = math.log(2) log2e = 1. / log2 -def isfinite(d): - return not rfloat.isinf(d) and not rfloat.isnan(d) - def simple_unary_op(func): specialize.argtype(1)(func) @functools.wraps(func) @@ -860,7 +857,7 @@ @raw_unary_op def isfinite(self, v): - return not (rfloat.isinf(v) or rfloat.isnan(v)) + return rfloat.isfinite(v) @simple_unary_op def radians(self, v): @@ -939,7 +936,7 @@ @simple_unary_op def rint(self, v): x = float(v) - if isfinite(x): + if rfloat.isfinite(x): import math y = math.floor(x) r = x - y @@ -1028,7 +1025,7 @@ def str_format(self, box): real, imag = self.for_computation(self.unbox(box)) imag_str = str_format(imag) - if rfloat.isnan(imag) or rfloat.isinf(imag): + if not rfloat.isfinite(imag): imag_str += '*' imag_str += 'j' @@ -1264,7 +1261,7 @@ def pow(self, v1, v2): if v1[1] == 0 and v2[1] == 0 and v1[0] > 0: return math.pow(v1[0], v2[0]), 0 - #if not isfinite(v1[0]) or not isfinite(v1[1]): + #if not rfloat.isfinite(v1[0]) or not rfloat.isfinite(v1[1]): # return rfloat.NAN, rfloat.NAN try: return rcomplex.c_pow(v1, v2) @@ -1331,9 +1328,9 @@ @specialize.argtype(1) def round(self, v, decimals=0): ans = list(self.for_computation(self.unbox(v))) - if isfinite(ans[0]): + if rfloat.isfinite(ans[0]): ans[0] = rfloat.round_double(ans[0], decimals, half_even=True) - if isfinite(ans[1]): + if rfloat.isfinite(ans[1]): ans[1] = rfloat.round_double(ans[1], decimals, half_even=True) return self.box_complex(ans[0], ans[1]) @@ -1363,7 +1360,7 @@ if v[0] < 0: return 0., 0. return rfloat.INFINITY, rfloat.NAN - elif (isfinite(v[0]) or \ + elif (rfloat.isfinite(v[0]) or \ (rfloat.isinf(v[0]) and v[0] > 0)): return rfloat.NAN, rfloat.NAN try: @@ -1391,7 +1388,7 @@ if v[0] < 0: return -1., 0. return rfloat.NAN, rfloat.NAN - elif (isfinite(v[0]) or \ + elif (rfloat.isfinite(v[0]) or \ (rfloat.isinf(v[0]) and v[0] > 0)): return rfloat.NAN, rfloat.NAN try: @@ -1408,7 +1405,7 @@ if rfloat.isinf(v[0]): if v[1] == 0.: return rfloat.NAN, 0. - if isfinite(v[1]): + if rfloat.isfinite(v[1]): return rfloat.NAN, rfloat.NAN elif not rfloat.isnan(v[1]): return rfloat.NAN, rfloat.INFINITY @@ -1419,7 +1416,7 @@ if rfloat.isinf(v[0]): if v[1] == 0.: return rfloat.NAN, 0.0 - if isfinite(v[1]): + if rfloat.isfinite(v[1]): return rfloat.NAN, rfloat.NAN elif not rfloat.isnan(v[1]): return rfloat.INFINITY, rfloat.NAN @@ -1427,7 +1424,7 @@ @complex_unary_op def tan(self, v): - if rfloat.isinf(v[0]) and isfinite(v[1]): + if rfloat.isinf(v[0]) and rfloat.isfinite(v[1]): return rfloat.NAN, rfloat.NAN return rcomplex.c_tan(*v) @@ -1453,7 +1450,7 @@ @complex_unary_op def sinh(self, v): if rfloat.isinf(v[1]): - if isfinite(v[0]): + if rfloat.isfinite(v[0]): if v[0] == 0.0: return 0.0, rfloat.NAN return rfloat.NAN, rfloat.NAN @@ -1464,7 +1461,7 @@ @complex_unary_op def cosh(self, v): if rfloat.isinf(v[1]): - if isfinite(v[0]): + if rfloat.isfinite(v[0]): if v[0] == 0.0: return rfloat.NAN, 0.0 return rfloat.NAN, rfloat.NAN @@ -1474,7 +1471,7 @@ @complex_unary_op def tanh(self, v): - if rfloat.isinf(v[1]) and isfinite(v[0]): + if rfloat.isinf(v[1]) and rfloat.isfinite(v[0]): return rfloat.NAN, rfloat.NAN return rcomplex.c_tanh(*v) @@ -1503,7 +1500,7 @@ @raw_unary_op def isfinite(self, v): - return isfinite(v[0]) and isfinite(v[1]) + return rfloat.isfinite(v[0]) and rfloat.isfinite(v[1]) #@simple_unary_op #def radians(self, v): From noreply at buildbot.pypy.org Tue Nov 5 02:48:40 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 5 Nov 2013 02:48:40 +0100 (CET) Subject: [pypy-commit] pypy default: sign of nan can be ambiguous, use isnan Message-ID: <20131105014840.EFC471C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67841:b782ce0d380d Date: 2013-11-04 20:46 -0500 http://bitbucket.org/pypy/pypy/changeset/b782ce0d380d/ Log: sign of nan can be ambiguous, use isnan diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -74,5 +74,6 @@ def test_complex_str_format(self): import numpy as np assert str(np.complex128(complex(1, float('nan')))) == '(1+nan*j)' + assert str(np.complex128(complex(1, float('-nan')))) == '(1+nan*j)' assert str(np.complex128(complex(1, float('inf')))) == '(1+inf*j)' assert str(np.complex128(complex(1, float('-inf')))) == '(1-inf*j)' diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1034,7 +1034,7 @@ return imag_str real_str = str_format(real) - op = '+' if rfloat.copysign(1, imag) > 0 else '' + op = '+' if imag >= 0 or rfloat.isnan(imag) else '' return ''.join(['(', real_str, op, imag_str, ')']) def fill(self, storage, width, box, start, stop, offset): From noreply at buildbot.pypy.org Tue Nov 5 04:11:56 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 5 Nov 2013 04:11:56 +0100 (CET) Subject: [pypy-commit] pypy default: provide astype/nbytes for scalars Message-ID: <20131105031156.8C0E11C02C7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67842:adc554f83b04 Date: 2013-11-04 22:03 -0500 http://bitbucket.org/pypy/pypy/changeset/adc554f83b04/ Log: provide astype/nbytes for scalars diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -249,6 +249,12 @@ v = self.convert_to(self.get_dtype(space)) return self.get_dtype(space).itemtype.round(v, decimals) + def descr_astype(self, space, w_dtype): + from pypy.module.micronumpy.interp_dtype import W_Dtype + dtype = space.interp_w(W_Dtype, + space.call_function(space.gettypefor(W_Dtype), w_dtype)) + return self.convert_to(dtype) + def descr_view(self, space, w_dtype): from pypy.module.micronumpy.interp_dtype import W_Dtype dtype = space.interp_w(W_Dtype, @@ -516,10 +522,12 @@ ravel = interp2app(W_GenericBox.descr_ravel), round = interp2app(W_GenericBox.descr_round), conjugate = interp2app(W_GenericBox.descr_conjugate), + astype = interp2app(W_GenericBox.descr_astype), view = interp2app(W_GenericBox.descr_view), dtype = GetSetProperty(W_GenericBox.descr_get_dtype), itemsize = GetSetProperty(W_GenericBox.descr_get_itemsize), + nbytes = GetSetProperty(W_GenericBox.descr_get_itemsize), shape = GetSetProperty(W_GenericBox.descr_get_shape), ndim = GetSetProperty(W_GenericBox.descr_get_ndim), ) diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -56,11 +56,21 @@ assert b.round() == 1.0 assert b.round(decimals=5) is b + def test_astype(self): + import numpy as np + a = np.bool_(True).astype(np.float32) + assert type(a) is np.float32 + assert a == 1.0 + a = np.bool_(True).astype('int32') + assert type(a) is np.int32 + assert a == 1 + def test_attributes(self): import numpypy as np value = np.dtype('int64').type(12345) assert value.dtype == np.dtype('int64') assert value.itemsize == 8 + assert value.nbytes == 8 assert value.shape == () assert value.ndim == 0 From noreply at buildbot.pypy.org Tue Nov 5 04:26:25 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 5 Nov 2013 04:26:25 +0100 (CET) Subject: [pypy-commit] pypy default: provide scalar.squeeze() Message-ID: <20131105032625.7AAB81C0651@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67843:55261a401261 Date: 2013-11-04 22:25 -0500 http://bitbucket.org/pypy/pypy/changeset/55261a401261/ Log: provide scalar.squeeze() diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -265,6 +265,9 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "view not implelemnted yet")) + def descr_squeeze(self): + return self + def descr_get_dtype(self, space): return self.get_dtype(space) @@ -524,6 +527,7 @@ conjugate = interp2app(W_GenericBox.descr_conjugate), astype = interp2app(W_GenericBox.descr_astype), view = interp2app(W_GenericBox.descr_view), + squeeze = interp2app(W_GenericBox.descr_squeeze), dtype = GetSetProperty(W_GenericBox.descr_get_dtype), itemsize = GetSetProperty(W_GenericBox.descr_get_itemsize), diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -65,6 +65,13 @@ assert type(a) is np.int32 assert a == 1 + def test_squeeze(self): + import numpy as np + assert np.True_.squeeze() is np.True_ + a = np.float32(1.0) + assert a.squeeze() is a + raises(TypeError, a.squeeze, 2) + def test_attributes(self): import numpypy as np value = np.dtype('int64').type(12345) From noreply at buildbot.pypy.org Tue Nov 5 04:35:06 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 5 Nov 2013 04:35:06 +0100 (CET) Subject: [pypy-commit] pypy default: provide scalar.{strides,T} Message-ID: <20131105033506.1C51F1C1161@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67844:53130555bf48 Date: 2013-11-04 22:32 -0500 http://bitbucket.org/pypy/pypy/changeset/53130555bf48/ Log: provide scalar.{strides,T} diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -265,7 +265,7 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "view not implelemnted yet")) - def descr_squeeze(self): + def descr_self(self, space): return self def descr_get_dtype(self, space): @@ -527,13 +527,15 @@ conjugate = interp2app(W_GenericBox.descr_conjugate), astype = interp2app(W_GenericBox.descr_astype), view = interp2app(W_GenericBox.descr_view), - squeeze = interp2app(W_GenericBox.descr_squeeze), + squeeze = interp2app(W_GenericBox.descr_self), dtype = GetSetProperty(W_GenericBox.descr_get_dtype), itemsize = GetSetProperty(W_GenericBox.descr_get_itemsize), nbytes = GetSetProperty(W_GenericBox.descr_get_itemsize), shape = GetSetProperty(W_GenericBox.descr_get_shape), + strides = GetSetProperty(W_GenericBox.descr_get_shape), ndim = GetSetProperty(W_GenericBox.descr_get_ndim), + T = GetSetProperty(W_GenericBox.descr_self), ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -79,7 +79,9 @@ assert value.itemsize == 8 assert value.nbytes == 8 assert value.shape == () + assert value.strides == () assert value.ndim == 0 + assert value.T is value def test_complex_scalar_complex_cast(self): import numpy as np From noreply at buildbot.pypy.org Tue Nov 5 12:36:11 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 5 Nov 2013 12:36:11 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: push gc roots around stm_set_transaction_length and stm_inspect_abort_info too Message-ID: <20131105113611.3F1411C13DA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r67846:4fa6e3b8b4db Date: 2013-11-05 09:55 +0100 http://bitbucket.org/pypy/pypy/changeset/4fa6e3b8b4db/ Log: push gc roots around stm_set_transaction_length and stm_inspect_abort_info too diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -97,11 +97,13 @@ self.default(hop) self.pop_roots(hop, livevars) - gct_stm_become_inevitable = _gct_with_roots_pushed - gct_stm_stop_all_other_threads = _gct_with_roots_pushed - gct_stm_partial_commit_and_resume_other_threads = _gct_with_roots_pushed - gct_stm_perform_transaction = _gct_with_roots_pushed - gct_stm_allocate_nonmovable_int_adr = _gct_with_roots_pushed + gct_stm_become_inevitable = _gct_with_roots_pushed + gct_stm_set_transaction_length = _gct_with_roots_pushed + gct_stm_stop_all_other_threads = _gct_with_roots_pushed + gct_stm_partial_commit_and_resume_other_threads = _gct_with_roots_pushed + gct_stm_perform_transaction = _gct_with_roots_pushed + gct_stm_allocate_nonmovable_int_adr = _gct_with_roots_pushed + gct_stm_inspect_abort_info = _gct_with_roots_pushed class StmRootWalker(BaseRootWalker): From noreply at buildbot.pypy.org Tue Nov 5 12:36:12 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 5 Nov 2013 12:36:12 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: add some debug prints to stmrewrite (print ops that use the fallback to Message-ID: <20131105113612.71CFE1C13F0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r67847:6823f3f7f83a Date: 2013-11-05 09:59 +0100 http://bitbucket.org/pypy/pypy/changeset/6823f3f7f83a/ Log: add some debug prints to stmrewrite (print ops that use the fallback to inevitable, intentionally or not) diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -5,6 +5,8 @@ from rpython.jit.metainterp.history import BoxPtr, ConstPtr, ConstInt from rpython.rlib.objectmodel import specialize from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.debug import (have_debug_prints, debug_start, debug_stop, + debug_print) # # STM Support @@ -38,6 +40,7 @@ def rewrite(self, operations): + debug_start("jit-stmrewrite-ops") # overridden method from parent class # insert_transaction_break = False @@ -175,10 +178,12 @@ continue # ---------- fall-back ---------- self.fallback_inevitable(op) + debug_print("fallback for", op.repr()) # # call_XX without guard_not_forced? assert not insert_transaction_break + debug_stop("jit-stmrewrite-ops") return self.newops def emitting_an_operation_that_can_collect(self): From noreply at buildbot.pypy.org Tue Nov 5 12:36:09 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 5 Nov 2013 12:36:09 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: make sure roots are pushed around jit_assembler_call and barriers get invalidated Message-ID: <20131105113609.EBDC31C115D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r67845:06bd16504902 Date: 2013-11-04 22:28 +0100 http://bitbucket.org/pypy/pypy/changeset/06bd16504902/ Log: make sure roots are pushed around jit_assembler_call and barriers get invalidated around it and stm_partial_commit_and_resume_other_threads diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -654,6 +654,11 @@ gct_indirect_call = gct_direct_call + def gct_jit_assembler_call(self, hop): + livevars = self.push_roots(hop) + self.default(hop) + self.pop_roots(hop, livevars) + def gct_fv_gc_malloc(self, hop, flags, TYPE, *args): op = hop.spaceop PTRTYPE = op.result.concretetype diff --git a/rpython/translator/stm/breakfinder.py b/rpython/translator/stm/breakfinder.py --- a/rpython/translator/stm/breakfinder.py +++ b/rpython/translator/stm/breakfinder.py @@ -5,6 +5,8 @@ 'stm_commit_transaction', 'stm_begin_inevitable_transaction', 'stm_perform_transaction', + 'stm_partial_commit_and_resume_other_threads', # new priv_revision + 'jit_assembler_call', ]) diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -556,6 +556,31 @@ assert self.barriers == ['a2i'] + def test_transaction_breaking_ops(self): + class X: + pass + x = X() + x2 = X() + + def f1(f): + x.a = x2 # write barrier + llop.stm_commit_transaction(lltype.Void) + x.a = x2 + llop.stm_begin_inevitable_transaction(lltype.Void) + x.a = x2 + llop.stm_partial_commit_and_resume_other_threads(lltype.Void) + x.a = x2 + llop.jit_assembler_call(lltype.Void) + x.a = x2 + llop.stm_perform_transaction(lltype.Void) + x.a = x2 + return x + + self.interpret(f1, [1]) + assert self.barriers == ['I2W']*6 + + + external_release_gil = rffi.llexternal('external_release_gil', [], lltype.Void, _callable=lambda: None, random_effects_on_gcobjs=True, diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -180,6 +180,15 @@ def op_stm_begin_inevitable_transaction(self): self.transaction_break() + def op_stm_partial_commit_and_resume_other_threads(self): + self.transaction_break() + + def op_jit_assembler_call(self): + self.transaction_break() # dummy for test_writebarrier.py + + def op_stm_perform_transaction(self): + self.transaction_break() # dummy for test_writebarrier.py + def op_gc_writebarrier(self, p): raise Exception("should have been removed") From noreply at buildbot.pypy.org Tue Nov 5 12:50:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 5 Nov 2013 12:50:24 +0100 (CET) Subject: [pypy-commit] pypy default: Rewrite string.maketrans() in a more pypy-friendly way, using bytearray(). Message-ID: <20131105115024.437B41D2338@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67848:5ac514ca713a Date: 2013-11-05 12:49 +0100 http://bitbucket.org/pypy/pypy/changeset/5ac514ca713a/ Log: Rewrite string.maketrans() in a more pypy-friendly way, using bytearray(). diff --git a/lib-python/2.7/string.py b/lib-python/2.7/string.py --- a/lib-python/2.7/string.py +++ b/lib-python/2.7/string.py @@ -66,16 +66,17 @@ must be of the same length. """ - if len(fromstr) != len(tostr): + n = len(fromstr) + if n != len(tostr): raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) + # this function has been rewritten to suit PyPy better; it is + # almost 10x faster than the original. + buf = bytearray(256) + for i in range(256): + buf[i] = i + for i in range(n): + buf[ord(fromstr[i])] = tostr[i] + return str(buf) From noreply at buildbot.pypy.org Tue Nov 5 16:10:46 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 5 Nov 2013 16:10:46 +0100 (CET) Subject: [pypy-commit] pypy default: optimize calling with strings of length one Message-ID: <20131105151046.991311C00F8@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r67849:ddc44e469723 Date: 2013-11-05 17:09 +0200 http://bitbucket.org/pypy/pypy/changeset/ddc44e469723/ Log: optimize calling with strings of length one diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py --- a/pypy/module/__pypy__/interp_builders.py +++ b/pypy/module/__pypy__/interp_builders.py @@ -26,7 +26,11 @@ @unwrap_spec(s=strtype) def descr_append(self, space, s): self._check_done(space) - self.builder.append(s) + if len(s) == 1: + # the same but annotated as char + self.builder.append(s[0]) + else: + self.builder.append(s) @unwrap_spec(s=strtype, start=int, end=int) def descr_append_slice(self, space, s, start, end): From noreply at buildbot.pypy.org Tue Nov 5 16:24:23 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 5 Nov 2013 16:24:23 +0100 (CET) Subject: [pypy-commit] pypy default: hack a little differently. test to follow Message-ID: <20131105152423.360FF1C0225@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r67850:d0546ed6ba48 Date: 2013-11-05 17:23 +0200 http://bitbucket.org/pypy/pypy/changeset/d0546ed6ba48/ Log: hack a little differently. test to follow diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py --- a/pypy/module/__pypy__/interp_builders.py +++ b/pypy/module/__pypy__/interp_builders.py @@ -4,6 +4,7 @@ from pypy.interpreter.typedef import TypeDef from rpython.rlib.rstring import UnicodeBuilder, StringBuilder from rpython.tool.sourcetools import func_with_new_name +from rpython.rlib import jit def create_builder(name, strtype, builder_cls): @@ -23,10 +24,16 @@ def descr__new__(space, w_subtype, size=-1): return W_Builder(space, size) + @jit.unroll_safe + def _append_multiple_chars(self, s): + for c in s: + self.builder.append(c) + @unwrap_spec(s=strtype) def descr_append(self, space, s): self._check_done(space) - if len(s) == 1: + if jit.is_constant(len(s)) and len(s) < 5: + self._append_multiple_chars(s) # the same but annotated as char self.builder.append(s[0]) else: From noreply at buildbot.pypy.org Tue Nov 5 16:25:27 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 5 Nov 2013 16:25:27 +0100 (CET) Subject: [pypy-commit] pypy default: Handle the case of a copy from virtual to concrete efficiently withs trings Message-ID: <20131105152527.9CBA11C0225@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67851:32c25a0ac64a Date: 2013-11-05 07:24 -0800 http://bitbucket.org/pypy/pypy/changeset/32c25a0ac64a/ Log: Handle the case of a copy from virtual to concrete efficiently withs trings diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5122,6 +5122,21 @@ """ self.optimize_strunicode_loop(ops, expected) + def test_str_copy_virtual_src_concrete_dst(self): + ops = """ + [p0] + p1 = newstr(1) + strsetitem(p1, 0, 101) + copystrcontent(p1, p0, 0, 0, 1) + finish(p0) + """ + expected = """ + [p0] + strsetitem(p0, 0, 101) + finish(p0) + """ + self.optimize_strunicode_loop(ops, expected) + def test_call_pure_vstring_const(self): py.test.skip("implement me") ops = """ diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -516,7 +516,6 @@ if length.is_constant() and length.box.getint() == 0: return elif ((src.is_virtual() or src.is_constant()) and - isinstance(dst, VStringPlainValue) and dst.is_virtual() and srcstart.is_constant() and dststart.is_constant() and length.is_constant()): src_start = srcstart.force_box(self).getint() @@ -527,7 +526,15 @@ assert actual_length <= MAX_CONST_LEN for index in range(actual_length): vresult = self.strgetitem(src, optimizer.ConstantValue(ConstInt(index + src_start)), mode) - dst.setitem(index + dst_start, vresult) + if isinstance(dst, VStringPlainValue): + dst.setitem(index + dst_start, vresult) + else: + op = ResOperation(mode.STRSETITEM, [ + op.getarg(1), + ConstInt(index + dst_start), + vresult.force_box(self), + ], None) + self.emit_operation(op) else: copy_str_content(self, src.force_box(self), @@ -752,9 +759,11 @@ def propagate_forward(self, op): dispatch_opt(self, op) + dispatch_opt = make_dispatcher_method(OptString, 'optimize_', default=OptString.emit_operation) + def _findall_call_oopspec(): prefix = 'opt_call_stroruni_' result = [] From noreply at buildbot.pypy.org Tue Nov 5 16:25:28 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 5 Nov 2013 16:25:28 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20131105152528.ED9F11C0225@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67852:b51a1a27936a Date: 2013-11-05 07:24 -0800 http://bitbucket.org/pypy/pypy/changeset/b51a1a27936a/ Log: merged upstream diff --git a/lib-python/2.7/string.py b/lib-python/2.7/string.py --- a/lib-python/2.7/string.py +++ b/lib-python/2.7/string.py @@ -66,16 +66,17 @@ must be of the same length. """ - if len(fromstr) != len(tostr): + n = len(fromstr) + if n != len(tostr): raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) + # this function has been rewritten to suit PyPy better; it is + # almost 10x faster than the original. + buf = bytearray(256) + for i in range(256): + buf[i] = i + for i in range(n): + buf[ord(fromstr[i])] = tostr[i] + return str(buf) diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py --- a/pypy/module/__pypy__/interp_builders.py +++ b/pypy/module/__pypy__/interp_builders.py @@ -4,6 +4,7 @@ from pypy.interpreter.typedef import TypeDef from rpython.rlib.rstring import UnicodeBuilder, StringBuilder from rpython.tool.sourcetools import func_with_new_name +from rpython.rlib import jit def create_builder(name, strtype, builder_cls): @@ -23,10 +24,20 @@ def descr__new__(space, w_subtype, size=-1): return W_Builder(space, size) + @jit.unroll_safe + def _append_multiple_chars(self, s): + for c in s: + self.builder.append(c) + @unwrap_spec(s=strtype) def descr_append(self, space, s): self._check_done(space) - self.builder.append(s) + if jit.is_constant(len(s)) and len(s) < 5: + self._append_multiple_chars(s) + # the same but annotated as char + self.builder.append(s[0]) + else: + self.builder.append(s) @unwrap_spec(s=strtype, start=int, end=int) def descr_append_slice(self, space, s, start, end): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -249,6 +249,12 @@ v = self.convert_to(self.get_dtype(space)) return self.get_dtype(space).itemtype.round(v, decimals) + def descr_astype(self, space, w_dtype): + from pypy.module.micronumpy.interp_dtype import W_Dtype + dtype = space.interp_w(W_Dtype, + space.call_function(space.gettypefor(W_Dtype), w_dtype)) + return self.convert_to(dtype) + def descr_view(self, space, w_dtype): from pypy.module.micronumpy.interp_dtype import W_Dtype dtype = space.interp_w(W_Dtype, @@ -259,6 +265,9 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "view not implelemnted yet")) + def descr_self(self, space): + return self + def descr_get_dtype(self, space): return self.get_dtype(space) @@ -516,12 +525,17 @@ ravel = interp2app(W_GenericBox.descr_ravel), round = interp2app(W_GenericBox.descr_round), conjugate = interp2app(W_GenericBox.descr_conjugate), + astype = interp2app(W_GenericBox.descr_astype), view = interp2app(W_GenericBox.descr_view), + squeeze = interp2app(W_GenericBox.descr_self), dtype = GetSetProperty(W_GenericBox.descr_get_dtype), itemsize = GetSetProperty(W_GenericBox.descr_get_itemsize), + nbytes = GetSetProperty(W_GenericBox.descr_get_itemsize), shape = GetSetProperty(W_GenericBox.descr_get_shape), + strides = GetSetProperty(W_GenericBox.descr_get_shape), ndim = GetSetProperty(W_GenericBox.descr_get_ndim), + T = GetSetProperty(W_GenericBox.descr_self), ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -56,13 +56,32 @@ assert b.round() == 1.0 assert b.round(decimals=5) is b + def test_astype(self): + import numpy as np + a = np.bool_(True).astype(np.float32) + assert type(a) is np.float32 + assert a == 1.0 + a = np.bool_(True).astype('int32') + assert type(a) is np.int32 + assert a == 1 + + def test_squeeze(self): + import numpy as np + assert np.True_.squeeze() is np.True_ + a = np.float32(1.0) + assert a.squeeze() is a + raises(TypeError, a.squeeze, 2) + def test_attributes(self): import numpypy as np value = np.dtype('int64').type(12345) assert value.dtype == np.dtype('int64') assert value.itemsize == 8 + assert value.nbytes == 8 assert value.shape == () + assert value.strides == () assert value.ndim == 0 + assert value.T is value def test_complex_scalar_complex_cast(self): import numpy as np From noreply at buildbot.pypy.org Tue Nov 5 18:08:36 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 5 Nov 2013 18:08:36 +0100 (CET) Subject: [pypy-commit] pypy default: Speed up array('B', 'somestr') or my_array.fromstring() Message-ID: <20131105170836.6FB2C1C0225@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67856:d592e1e60494 Date: 2013-11-05 09:08 -0800 http://bitbucket.org/pypy/pypy/changeset/d592e1e60494/ Log: Speed up array('B', 'somestr') or my_array.fromstring() diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -1,18 +1,22 @@ from __future__ import with_statement +from rpython.rlib import jit +from rpython.rlib.objectmodel import keepalive_until_here +from rpython.rlib.rarithmetic import ovfcheck, widen +from rpython.rlib.unroll import unrolling_iterable +from rpython.rtyper.annlowlevel import llstr +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw + +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec, interpindirect2app from pypy.interpreter.typedef import GetSetProperty, make_weakref_descr, TypeDef -from pypy.interpreter.baseobjspace import W_Root from pypy.module._file.interp_file import W_File -from rpython.rlib import jit -from rpython.rlib.rarithmetic import ovfcheck, widen -from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.objectmodel import keepalive_until_here -from rpython.rtyper.lltypesystem import lltype, rffi from pypy.objspace.std.floatobject import W_FloatObject + @unwrap_spec(typecode=str) def w_array(space, w_cls, typecode, __args__): if len(__args__.arguments_w) > 1: @@ -234,8 +238,7 @@ new = len(s) / self.itemsize self.setlen(oldlen + new) cbuf = self._charbuf_start() - for i in range(len(s)): - cbuf[oldlen * self.itemsize + i] = s[i] + copy_string_to_raw(llstr(s), rffi.ptradd(cbuf, oldlen * self.itemsize), 0, len(s)) self._charbuf_stop() @unwrap_spec(w_f=W_File, n=int) @@ -624,7 +627,7 @@ item = unwrap(space.call_method(w_item, mytype.method)) except OperationError: msg = 'array item must be ' + mytype.unwrap[:-2] - raise OperationError(space.w_TypeError, space.wrap(msg)) + raise OperationError(space.w_TypeError, space.wrap(msg)) else: raise if mytype.unwrap == 'bigint_w': From noreply at buildbot.pypy.org Tue Nov 5 19:33:48 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 5 Nov 2013 19:33:48 +0100 (CET) Subject: [pypy-commit] pypy default: Speed up StringBuilder.append_charpsize Message-ID: <20131105183348.C0CD61C00F8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67857:d9e23b12374a Date: 2013-11-05 10:33 -0800 http://bitbucket.org/pypy/pypy/changeset/d9e23b12374a/ Log: Speed up StringBuilder.append_charpsize diff --git a/rpython/rtyper/lltypesystem/rbuilder.py b/rpython/rtyper/lltypesystem/rbuilder.py --- a/rpython/rtyper/lltypesystem/rbuilder.py +++ b/rpython/rtyper/lltypesystem/rbuilder.py @@ -39,16 +39,24 @@ rstr.copy_unicode_contents) STRINGBUILDER = lltype.GcStruct('stringbuilder', - ('allocated', lltype.Signed), - ('used', lltype.Signed), - ('buf', lltype.Ptr(STR)), - adtmeths={'grow':staticAdtMethod(stringbuilder_grow)}) + ('allocated', lltype.Signed), + ('used', lltype.Signed), + ('buf', lltype.Ptr(STR)), + adtmeths={ + 'grow': staticAdtMethod(stringbuilder_grow), + 'copy_raw_to_string': staticAdtMethod(rstr.copy_raw_to_string), + } +) UNICODEBUILDER = lltype.GcStruct('unicodebuilder', - ('allocated', lltype.Signed), - ('used', lltype.Signed), - ('buf', lltype.Ptr(UNICODE)), - adtmeths={'grow':staticAdtMethod(unicodebuilder_grow)}) + ('allocated', lltype.Signed), + ('used', lltype.Signed), + ('buf', lltype.Ptr(UNICODE)), + adtmeths={ + 'grow': staticAdtMethod(unicodebuilder_grow), + 'copy_raw_to_string': staticAdtMethod(rstr.copy_raw_to_unicode), + } +) MAX = 16*1024*1024 @@ -109,10 +117,8 @@ used = ll_builder.used if used + size > ll_builder.allocated: ll_builder.grow(ll_builder, size) - for i in xrange(size): - ll_builder.buf.chars[used] = charp[i] - used += 1 - ll_builder.used = used + ll_builder.copy_raw_to_string(charp, ll_builder.buf, used, size) + ll_builder.used += size @staticmethod def ll_getlength(ll_builder): diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -106,10 +106,23 @@ copy_string_to_raw._always_inline_ = True copy_string_to_raw = func_with_new_name(copy_string_to_raw, 'copy_%s_to_raw' % name) - return copy_string_to_raw, copy_string_contents + @jit.oopspec('stroruni.copy_raw_to_string(ptrsrc, dst, dststart, length)') + def copy_raw_to_string(ptrsrc, dst, dststart, length): + # xxx Warning: same note as above apply: don't do this at home + assert length >= 0 + # from here, no GC operations can happen + dst = _get_raw_buf(SRC_TP, dst, dststart) + adr = llmemory.cast_ptr_to_adr(ptrsrc) -copy_string_to_raw, copy_string_contents = _new_copy_contents_fun(STR, STR, Char, 'string') -copy_unicode_to_raw, copy_unicode_contents = _new_copy_contents_fun(UNICODE, UNICODE, + srcbuf = adr + llmemory.itemoffsetof(typeOf(ptrsrc).TO, 0) + llmemory.raw_memcopy(srcbuf, dst, llmemory.sizeof(CHAR_TP) * length) + # end of "no GC" section + keepalive_until_here(dst) + + return copy_string_to_raw, copy_raw_to_string, copy_string_contents + +copy_string_to_raw, copy_raw_to_string, copy_string_contents = _new_copy_contents_fun(STR, STR, Char, 'string') +copy_unicode_to_raw, copy_raw_to_unicode, copy_unicode_contents = _new_copy_contents_fun(UNICODE, UNICODE, UniChar, 'unicode') CONST_STR_CACHE = WeakValueDictionary() From noreply at buildbot.pypy.org Tue Nov 5 20:00:38 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 5 Nov 2013 20:00:38 +0100 (CET) Subject: [pypy-commit] pypy default: translation fix Message-ID: <20131105190038.99EDF1C01F3@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67858:ab54e581fc29 Date: 2013-11-05 10:59 -0800 http://bitbucket.org/pypy/pypy/changeset/ab54e581fc29/ Log: translation fix diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -106,7 +106,7 @@ copy_string_to_raw._always_inline_ = True copy_string_to_raw = func_with_new_name(copy_string_to_raw, 'copy_%s_to_raw' % name) - @jit.oopspec('stroruni.copy_raw_to_string(ptrsrc, dst, dststart, length)') + @jit.dont_look_inside def copy_raw_to_string(ptrsrc, dst, dststart, length): # xxx Warning: same note as above apply: don't do this at home assert length >= 0 @@ -118,6 +118,9 @@ llmemory.raw_memcopy(srcbuf, dst, llmemory.sizeof(CHAR_TP) * length) # end of "no GC" section keepalive_until_here(dst) + copy_raw_to_string._always_inline_ = True + copy_raw_to_string = func_with_new_name(copy_raw_to_string, + 'copy_raw_to_%s' % name) return copy_string_to_raw, copy_raw_to_string, copy_string_contents From noreply at buildbot.pypy.org Tue Nov 5 20:37:39 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 5 Nov 2013 20:37:39 +0100 (CET) Subject: [pypy-commit] buildbot add-header-to-nightly: make the nightly template look a bit more like the rest of the buildbot pages Message-ID: <20131105193739.D62AD1C0225@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: add-header-to-nightly Changeset: r876:d444c93bb7b3 Date: 2013-11-05 20:36 +0100 http://bitbucket.org/pypy/buildbot/changeset/d444c93bb7b3/ Log: make the nightly template look a bit more like the rest of the buildbot pages diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -157,7 +157,9 @@ pass class PyPyDirectoryLister(DirectoryLister): - template = """ + template = """ + %%(header)s @@ -189,7 +191,6 @@ } body { border: 0; padding: 0; margin: 0; background-color: #efefef; } -h1 {padding: 0.1em; background-color: #777; color: white; border-bottom: thin white dashed;} td,th {padding-left: 0.5em; padding-right: 0.5em; } @@ -217,23 +218,25 @@ - About -

%%(header)s

+
+
+

%%(header)s

- - - - - - - - - - - -%%(tableContent)s - -
FilenameSizeDateown testsapplevel tests
- + + + + + + + + + + + + %%(tableContent)s + +
FilenameSizeDateown testsapplevel tests
+
""" From noreply at buildbot.pypy.org Tue Nov 5 22:04:36 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 5 Nov 2013 22:04:36 +0100 (CET) Subject: [pypy-commit] stmgc default: fix impersonating the other thread when forcing minor collections in other threads. Message-ID: <20131105210436.625451C00F8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r544:8a3b7748ba7f Date: 2013-11-05 22:04 +0100 http://bitbucket.org/pypy/stmgc/changeset/8a3b7748ba7f/ Log: fix impersonating the other thread when forcing minor collections in other threads. also use the direct thread locals in some places (for better or worse). diff --git a/c4/Makefile b/c4/Makefile --- a/c4/Makefile +++ b/c4/Makefile @@ -31,7 +31,7 @@ # gcc address sanitizer: -fPIE -pie -fsanitize=address -lasan -fno-omit-frame-pointer debug-%: %.c ${H_FILES} ${C_FILES} - gcc -pthread -DDUMP_EXTRA=1 ${DEBUG} $< -o debug-$* -Wall ${C_FILES} -lrt + gcc -Wall -pthread -DDUMP_EXTRA=1 ${DEBUG} $< -o debug-$* -Wall ${C_FILES} -lrt release-%: %.c ${H_FILES} ${C_FILES} stmgc.c gcc -pthread -DNDEBUG -O2 -g $< -o release-$* -Wall stmgc.c -lrt diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -705,7 +705,7 @@ } struct tx_descriptor *d = thread_descriptor; - assert(*d->active_ref >= 1); + assert(stm_active >= 1); /* We need the collection_lock for the sequel; this is required notably because we're about to edit flags on a protected object. @@ -889,7 +889,7 @@ void SpinLoop(int num) { struct tx_descriptor *d = thread_descriptor; - assert(*d->active_ref >= 1); + assert(stm_active >= 1); assert(num < SPINLOOP_REASONS); d->num_spinloops[num]++; smp_spinloop(); @@ -924,7 +924,7 @@ assert(!stm_has_got_any_lock(d)); } - assert(*d->active_ref != 0); + assert(stm_active != 0); assert(!is_inevitable(d)); assert(num < ABORT_REASONS); d->num_aborts[num]++; @@ -989,7 +989,7 @@ SpinLoop(SPLP_ABORT); /* make the transaction no longer active */ - *d->active_ref = 0; + stm_active = 0; d->atomic = 0; /* release the lock */ @@ -1043,10 +1043,10 @@ void AbortNowIfDelayed(void) { struct tx_descriptor *d = thread_descriptor; - if (*d->active_ref < 0) + if (stm_active < 0) { - int reason = -*d->active_ref; - *d->active_ref = 1; + int reason = -stm_active; + stm_active = 1; AbortTransaction(reason); } } @@ -1098,7 +1098,7 @@ { struct tx_descriptor *d = thread_descriptor; init_transaction(d, 0); - *d->active_ref = 1; + stm_active = 1; d->setjmp_buf = buf; d->longjmp_callback = longjmp_callback; d->old_thread_local_obj = stm_thread_local_obj; @@ -1508,7 +1508,7 @@ spinlock_release(d->public_descriptor->collection_lock); d->num_commits++; - *d->active_ref = 0; + stm_active = 0; if (!stay_inevitable) stm_stop_sharedlock(); @@ -1550,7 +1550,7 @@ { /* must save roots around this call */ revision_t cur_time; struct tx_descriptor *d = thread_descriptor; - if (d == NULL || *d->active_ref != 1) + if (d == NULL || stm_active != 1) return; /* I am already inevitable, or not in a transaction at all (XXX statically we should know when we're outside a transaction) */ @@ -1761,11 +1761,15 @@ assert(d->my_lock & 1); assert(d->my_lock >= LOCKED); stm_private_rev_num = -d->my_lock; + /* Attention: in the following, we add references to real thread-locals + to the thread_descriptor. Make sure that force_minor_collections() + fakes all of them when doing minor collections in other threads! */ d->active_ref = &stm_active; d->nursery_current_ref = &stm_nursery_current; d->nursery_nextlimit_ref = &stm_nursery_nextlimit; d->private_revision_ref = &stm_private_rev_num; d->read_barrier_cache_ref = &stm_read_barrier_cache; + stm_thread_local_obj = NULL; d->thread_local_obj_ref = &stm_thread_local_obj; d->max_aborts = -1; diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -25,7 +25,7 @@ void stm_call_on_abort(void *key, void callback(void *)) { struct tx_descriptor *d = thread_descriptor; - if (d == NULL || *d->active_ref != 1) + if (d == NULL || stm_active != 1) return; /* ignore callbacks if we're outside a transaction or in an inevitable transaction (which cannot abort) */ if (callback == NULL) { diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -919,9 +919,15 @@ struct tx_descriptor *saved = thread_descriptor; revision_t saved_private_rev = stm_private_rev_num; char *saved_read_barrier_cache = stm_read_barrier_cache; + int saved_active = stm_active; + char *saved_nursery_current = stm_nursery_current; + char *saved_nursery_nextlimit = stm_nursery_nextlimit; assert(saved_private_rev == *saved->private_revision_ref); assert(saved_read_barrier_cache == *saved->read_barrier_cache_ref); + assert(saved_active == *saved->active_ref); + assert(saved_nursery_current == *saved->nursery_current_ref); + assert(saved_nursery_nextlimit == *saved->nursery_nextlimit_ref); for (d = stm_tx_head; d; d = d->tx_next) { /* Force a minor collection to run in the thread 'd'. @@ -933,20 +939,49 @@ /* Hack: temporarily pretend that we "are" the other thread... */ assert(d->shadowstack_end_ref && *d->shadowstack_end_ref); - thread_descriptor = d; - stm_private_rev_num = *d->private_revision_ref; + /* set thread locals to expected values */ + thread_descriptor = d; + stm_private_rev_num = *d->private_revision_ref; stm_read_barrier_cache = *d->read_barrier_cache_ref; + stm_active = *d->active_ref; + stm_nursery_current = *d->nursery_current_ref; + stm_nursery_nextlimit = *d->nursery_nextlimit_ref; + /* save, then point _refs to the new thread-locals */ + revision_t *d_private_revision_ref = d->private_revision_ref; + char **d_read_barrier_cache_ref = d->read_barrier_cache_ref; + int *d_active_ref = d->active_ref; + char **d_nursery_current_ref = d->nursery_current_ref; + char **d_nursery_nextlimit_ref = d->nursery_nextlimit_ref; + d->private_revision_ref = &stm_private_rev_num; + d->read_barrier_cache_ref = &stm_read_barrier_cache; + d->active_ref = &stm_active; + d->nursery_current_ref = &stm_nursery_current; + d->nursery_nextlimit_ref = &stm_nursery_nextlimit; + /* we impersonated the other thread. */ stmgc_minor_collect_no_abort(); - assert(stm_private_rev_num == *d->private_revision_ref); - *d->read_barrier_cache_ref = stm_read_barrier_cache; - - thread_descriptor = saved; - stm_private_rev_num = saved_private_rev; - stm_read_barrier_cache = saved_read_barrier_cache; + /* priv_rev didn't change! others may have */ + assert(*d_private_revision_ref == stm_private_rev_num); + *d_read_barrier_cache_ref = stm_read_barrier_cache; + *d_active_ref = stm_active; + *d_nursery_current_ref = stm_nursery_current; + *d_nursery_nextlimit_ref = stm_nursery_nextlimit; + /* restore _ref pointers in other thread */ + d->private_revision_ref = d_private_revision_ref; + d->read_barrier_cache_ref = d_read_barrier_cache_ref; + d->active_ref = d_active_ref; + d->nursery_current_ref = d_nursery_current_ref; + d->nursery_nextlimit_ref = d_nursery_nextlimit_ref; } } + /* restore current thread */ + thread_descriptor = saved; + stm_private_rev_num = saved_private_rev; + stm_read_barrier_cache = saved_read_barrier_cache; + stm_active = saved_active; + stm_nursery_current = saved_nursery_current; + stm_nursery_nextlimit = saved_nursery_nextlimit; stmgc_minor_collect_no_abort(); } diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -36,8 +36,8 @@ assert(d->nursery_base == NULL); d->nursery_base = stm_malloc(GC_NURSERY); /* start of nursery */ d->nursery_end = d->nursery_base + GC_NURSERY; /* end of nursery */ - *d->nursery_current_ref = d->nursery_base; /* current position */ - *d->nursery_nextlimit_ref = d->nursery_base; /* next section limit */ + stm_nursery_current = d->nursery_base; /* current position */ + stm_nursery_nextlimit = d->nursery_base; /* next section limit */ d->nursery_cleared = NC_REGULAR; dprintf(("minor: nursery is at [%p to %p]\n", d->nursery_base, @@ -63,7 +63,7 @@ void stmgc_minor_collect_soon(void) { struct tx_descriptor *d = thread_descriptor; - *d->nursery_current_ref = d->nursery_end; + stm_nursery_current = d->nursery_end; } inline static gcptr allocate_nursery(size_t size, revision_t tid) @@ -71,11 +71,11 @@ /* if 'tid == -1', we must not collect */ struct tx_descriptor *d = thread_descriptor; gcptr P; - char *cur = *d->nursery_current_ref; + char *cur = stm_nursery_current; char *end = cur + size; assert((size & 3) == 0); - *d->nursery_current_ref = end; - if (end > *d->nursery_nextlimit_ref) { + stm_nursery_current = end; + if (end > stm_nursery_nextlimit) { P = allocate_next_section(size, tid); } else { @@ -592,7 +592,7 @@ First fix 'nursery_current', left to a bogus value by the caller. */ struct tx_descriptor *d = thread_descriptor; - *d->nursery_current_ref -= allocate_size; + stm_nursery_current -= allocate_size; /* Are we asking for a "reasonable" number of bytes, i.e. a value at most equal to one section? @@ -612,8 +612,8 @@ } /* Are we at the end of the nursery? */ - if (*d->nursery_nextlimit_ref == d->nursery_end || - *d->nursery_current_ref == d->nursery_end) { // stmgc_minor_collect_soon() + if (stm_nursery_nextlimit == d->nursery_end || + stm_nursery_current == d->nursery_end) { // stmgc_minor_collect_soon() /* Yes */ if (tid == -1) return NULL; /* cannot collect */ @@ -629,12 +629,12 @@ /* Clear the next section */ if (d->nursery_cleared != NC_ALREADY_CLEARED) - memset(*d->nursery_nextlimit_ref, 0, GC_NURSERY_SECTION); - *d->nursery_nextlimit_ref += GC_NURSERY_SECTION; + memset(stm_nursery_nextlimit, 0, GC_NURSERY_SECTION); + stm_nursery_nextlimit += GC_NURSERY_SECTION; /* Return the object from there */ - gcptr P = (gcptr)(*d->nursery_current_ref); - *d->nursery_current_ref += allocate_size; + gcptr P = (gcptr)(stm_nursery_current); + stm_nursery_current += allocate_size; assert(*d->nursery_current_ref <= *d->nursery_nextlimit_ref); P->h_tid = tid; diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -135,7 +135,7 @@ /* change the default transaction length, and ask if now would be a good time to break the transaction (by returning from the 'callback' above with a positive value). */ -void stm_set_transaction_length(long length_max); +void stm_set_transaction_length(long length_max); /* save roots! */ _Bool stm_should_break_transaction(void); /* change the atomic counter by 'delta' and return the new value. Used @@ -162,7 +162,7 @@ stm_inspect_abort_info(). (XXX details not documented yet) */ void stm_abort_info_push(gcptr obj, long fieldoffsets[]); void stm_abort_info_pop(long count); -char *stm_inspect_abort_info(void); /* turns inevitable */ +char *stm_inspect_abort_info(void); /* turns inevitable, push roots! */ /* mostly for debugging support */ void stm_abort_and_retry(void); diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -9,7 +9,7 @@ static revision_t sync_required = 0; void stm_set_transaction_length(long length_max) -{ +{ /* save roots around this call! */ BecomeInevitable("set_transaction_length"); if (length_max <= 0) { length_max = 1; @@ -42,7 +42,7 @@ d->reads_size_limit_nonatomic)); /* if is_inevitable(), reads_size_limit_nonatomic should be 0 (and thus reads_size_limit too, if !d->atomic.) */ - if (*d->active_ref == 2) + if (stm_active == 2) assert(d->reads_size_limit_nonatomic == 0); #endif @@ -167,7 +167,7 @@ has configured 'reads_size_limit_nonatomic' to a smaller value. When such a shortened transaction succeeds, the next one will see its length limit doubled, up to the maximum. */ - if (counter == 0 && *d->active_ref != 2) { + if (counter == 0 && stm_active != 2) { unsigned long limit = d->reads_size_limit_nonatomic; if (limit != 0 && limit < (stm_regular_length_limit >> 1)) limit = (limit << 1) | 1; @@ -182,7 +182,7 @@ /* atomic transaction: a common case is that callback() returned even though we are atomic because we need a major GC. For that case, release and reaquire the rw lock here. */ - assert(*d->active_ref >= 1); + assert(stm_active >= 1); stm_possible_safe_point(); } @@ -217,7 +217,7 @@ { /* must save roots around this call */ struct tx_descriptor *d = thread_descriptor; if (d->atomic) { - assert(*d->active_ref >= 1); + assert(stm_active >= 1); stm_possible_safe_point(); } else { @@ -266,7 +266,7 @@ int stm_in_transaction(void) { struct tx_descriptor *d = thread_descriptor; - return d && *d->active_ref; + return d && stm_active; } /************************************************************/ @@ -336,7 +336,7 @@ void stm_partial_commit_and_resume_other_threads(void) { /* push gc roots! */ struct tx_descriptor *d = thread_descriptor; - assert(*d->active_ref == 2); + assert(stm_active == 2); int atomic = d->atomic; /* Give up atomicity during commit. This still works because @@ -390,7 +390,7 @@ /* Warning, may block waiting for rwlock_in_transaction while another thread runs a major GC */ - assert(*thread_descriptor->active_ref); + assert(stm_active); assert(in_single_thread != thread_descriptor); stm_stop_sharedlock(); From noreply at buildbot.pypy.org Tue Nov 5 22:07:16 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 5 Nov 2013 22:07:16 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: import stmgc (contains important fix regarding new thread locals) Message-ID: <20131105210716.C7CFD1C00F8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r67859:01c1a87d3707 Date: 2013-11-05 22:05 +0100 http://bitbucket.org/pypy/pypy/changeset/01c1a87d3707/ Log: import stmgc (contains important fix regarding new thread locals) diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -706,7 +706,7 @@ } struct tx_descriptor *d = thread_descriptor; - assert(*d->active_ref >= 1); + assert(stm_active >= 1); /* We need the collection_lock for the sequel; this is required notably because we're about to edit flags on a protected object. @@ -890,7 +890,7 @@ void SpinLoop(int num) { struct tx_descriptor *d = thread_descriptor; - assert(*d->active_ref >= 1); + assert(stm_active >= 1); assert(num < SPINLOOP_REASONS); d->num_spinloops[num]++; smp_spinloop(); @@ -925,7 +925,7 @@ assert(!stm_has_got_any_lock(d)); } - assert(*d->active_ref != 0); + assert(stm_active != 0); assert(!is_inevitable(d)); assert(num < ABORT_REASONS); d->num_aborts[num]++; @@ -990,7 +990,7 @@ SpinLoop(SPLP_ABORT); /* make the transaction no longer active */ - *d->active_ref = 0; + stm_active = 0; d->atomic = 0; /* release the lock */ @@ -1044,10 +1044,10 @@ void AbortNowIfDelayed(void) { struct tx_descriptor *d = thread_descriptor; - if (*d->active_ref < 0) + if (stm_active < 0) { - int reason = -*d->active_ref; - *d->active_ref = 1; + int reason = -stm_active; + stm_active = 1; AbortTransaction(reason); } } @@ -1099,7 +1099,7 @@ { struct tx_descriptor *d = thread_descriptor; init_transaction(d, 0); - *d->active_ref = 1; + stm_active = 1; d->setjmp_buf = buf; d->longjmp_callback = longjmp_callback; d->old_thread_local_obj = stm_thread_local_obj; @@ -1509,7 +1509,7 @@ spinlock_release(d->public_descriptor->collection_lock); d->num_commits++; - *d->active_ref = 0; + stm_active = 0; if (!stay_inevitable) stm_stop_sharedlock(); @@ -1551,7 +1551,7 @@ { /* must save roots around this call */ revision_t cur_time; struct tx_descriptor *d = thread_descriptor; - if (d == NULL || *d->active_ref != 1) + if (d == NULL || stm_active != 1) return; /* I am already inevitable, or not in a transaction at all (XXX statically we should know when we're outside a transaction) */ @@ -1762,11 +1762,15 @@ assert(d->my_lock & 1); assert(d->my_lock >= LOCKED); stm_private_rev_num = -d->my_lock; + /* Attention: in the following, we add references to real thread-locals + to the thread_descriptor. Make sure that force_minor_collections() + fakes all of them when doing minor collections in other threads! */ d->active_ref = &stm_active; d->nursery_current_ref = &stm_nursery_current; d->nursery_nextlimit_ref = &stm_nursery_nextlimit; d->private_revision_ref = &stm_private_rev_num; d->read_barrier_cache_ref = &stm_read_barrier_cache; + stm_thread_local_obj = NULL; d->thread_local_obj_ref = &stm_thread_local_obj; d->max_aborts = -1; diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c --- a/rpython/translator/stm/src_stm/extra.c +++ b/rpython/translator/stm/src_stm/extra.c @@ -26,7 +26,7 @@ void stm_call_on_abort(void *key, void callback(void *)) { struct tx_descriptor *d = thread_descriptor; - if (d == NULL || *d->active_ref != 1) + if (d == NULL || stm_active != 1) return; /* ignore callbacks if we're outside a transaction or in an inevitable transaction (which cannot abort) */ if (callback == NULL) { diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c --- a/rpython/translator/stm/src_stm/gcpage.c +++ b/rpython/translator/stm/src_stm/gcpage.c @@ -920,9 +920,15 @@ struct tx_descriptor *saved = thread_descriptor; revision_t saved_private_rev = stm_private_rev_num; char *saved_read_barrier_cache = stm_read_barrier_cache; + int saved_active = stm_active; + char *saved_nursery_current = stm_nursery_current; + char *saved_nursery_nextlimit = stm_nursery_nextlimit; assert(saved_private_rev == *saved->private_revision_ref); assert(saved_read_barrier_cache == *saved->read_barrier_cache_ref); + assert(saved_active == *saved->active_ref); + assert(saved_nursery_current == *saved->nursery_current_ref); + assert(saved_nursery_nextlimit == *saved->nursery_nextlimit_ref); for (d = stm_tx_head; d; d = d->tx_next) { /* Force a minor collection to run in the thread 'd'. @@ -934,20 +940,49 @@ /* Hack: temporarily pretend that we "are" the other thread... */ assert(d->shadowstack_end_ref && *d->shadowstack_end_ref); - thread_descriptor = d; - stm_private_rev_num = *d->private_revision_ref; + /* set thread locals to expected values */ + thread_descriptor = d; + stm_private_rev_num = *d->private_revision_ref; stm_read_barrier_cache = *d->read_barrier_cache_ref; + stm_active = *d->active_ref; + stm_nursery_current = *d->nursery_current_ref; + stm_nursery_nextlimit = *d->nursery_nextlimit_ref; + /* save, then point _refs to the new thread-locals */ + revision_t *d_private_revision_ref = d->private_revision_ref; + char **d_read_barrier_cache_ref = d->read_barrier_cache_ref; + int *d_active_ref = d->active_ref; + char **d_nursery_current_ref = d->nursery_current_ref; + char **d_nursery_nextlimit_ref = d->nursery_nextlimit_ref; + d->private_revision_ref = &stm_private_rev_num; + d->read_barrier_cache_ref = &stm_read_barrier_cache; + d->active_ref = &stm_active; + d->nursery_current_ref = &stm_nursery_current; + d->nursery_nextlimit_ref = &stm_nursery_nextlimit; + /* we impersonated the other thread. */ stmgc_minor_collect_no_abort(); - assert(stm_private_rev_num == *d->private_revision_ref); - *d->read_barrier_cache_ref = stm_read_barrier_cache; - - thread_descriptor = saved; - stm_private_rev_num = saved_private_rev; - stm_read_barrier_cache = saved_read_barrier_cache; + /* priv_rev didn't change! others may have */ + assert(*d_private_revision_ref == stm_private_rev_num); + *d_read_barrier_cache_ref = stm_read_barrier_cache; + *d_active_ref = stm_active; + *d_nursery_current_ref = stm_nursery_current; + *d_nursery_nextlimit_ref = stm_nursery_nextlimit; + /* restore _ref pointers in other thread */ + d->private_revision_ref = d_private_revision_ref; + d->read_barrier_cache_ref = d_read_barrier_cache_ref; + d->active_ref = d_active_ref; + d->nursery_current_ref = d_nursery_current_ref; + d->nursery_nextlimit_ref = d_nursery_nextlimit_ref; } } + /* restore current thread */ + thread_descriptor = saved; + stm_private_rev_num = saved_private_rev; + stm_read_barrier_cache = saved_read_barrier_cache; + stm_active = saved_active; + stm_nursery_current = saved_nursery_current; + stm_nursery_nextlimit = saved_nursery_nextlimit; stmgc_minor_collect_no_abort(); } diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c --- a/rpython/translator/stm/src_stm/nursery.c +++ b/rpython/translator/stm/src_stm/nursery.c @@ -37,8 +37,8 @@ assert(d->nursery_base == NULL); d->nursery_base = stm_malloc(GC_NURSERY); /* start of nursery */ d->nursery_end = d->nursery_base + GC_NURSERY; /* end of nursery */ - *d->nursery_current_ref = d->nursery_base; /* current position */ - *d->nursery_nextlimit_ref = d->nursery_base; /* next section limit */ + stm_nursery_current = d->nursery_base; /* current position */ + stm_nursery_nextlimit = d->nursery_base; /* next section limit */ d->nursery_cleared = NC_REGULAR; dprintf(("minor: nursery is at [%p to %p]\n", d->nursery_base, @@ -64,7 +64,7 @@ void stmgc_minor_collect_soon(void) { struct tx_descriptor *d = thread_descriptor; - *d->nursery_current_ref = d->nursery_end; + stm_nursery_current = d->nursery_end; } inline static gcptr allocate_nursery(size_t size, revision_t tid) @@ -72,11 +72,11 @@ /* if 'tid == -1', we must not collect */ struct tx_descriptor *d = thread_descriptor; gcptr P; - char *cur = *d->nursery_current_ref; + char *cur = stm_nursery_current; char *end = cur + size; assert((size & 3) == 0); - *d->nursery_current_ref = end; - if (end > *d->nursery_nextlimit_ref) { + stm_nursery_current = end; + if (end > stm_nursery_nextlimit) { P = allocate_next_section(size, tid); } else { @@ -593,7 +593,7 @@ First fix 'nursery_current', left to a bogus value by the caller. */ struct tx_descriptor *d = thread_descriptor; - *d->nursery_current_ref -= allocate_size; + stm_nursery_current -= allocate_size; /* Are we asking for a "reasonable" number of bytes, i.e. a value at most equal to one section? @@ -613,8 +613,8 @@ } /* Are we at the end of the nursery? */ - if (*d->nursery_nextlimit_ref == d->nursery_end || - *d->nursery_current_ref == d->nursery_end) { // stmgc_minor_collect_soon() + if (stm_nursery_nextlimit == d->nursery_end || + stm_nursery_current == d->nursery_end) { // stmgc_minor_collect_soon() /* Yes */ if (tid == -1) return NULL; /* cannot collect */ @@ -630,12 +630,12 @@ /* Clear the next section */ if (d->nursery_cleared != NC_ALREADY_CLEARED) - memset(*d->nursery_nextlimit_ref, 0, GC_NURSERY_SECTION); - *d->nursery_nextlimit_ref += GC_NURSERY_SECTION; + memset(stm_nursery_nextlimit, 0, GC_NURSERY_SECTION); + stm_nursery_nextlimit += GC_NURSERY_SECTION; /* Return the object from there */ - gcptr P = (gcptr)(*d->nursery_current_ref); - *d->nursery_current_ref += allocate_size; + gcptr P = (gcptr)(stm_nursery_current); + stm_nursery_current += allocate_size; assert(*d->nursery_current_ref <= *d->nursery_nextlimit_ref); P->h_tid = tid; diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -79aa5685d286 +8a3b7748ba7f diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -136,7 +136,7 @@ /* change the default transaction length, and ask if now would be a good time to break the transaction (by returning from the 'callback' above with a positive value). */ -void stm_set_transaction_length(long length_max); +void stm_set_transaction_length(long length_max); /* save roots! */ _Bool stm_should_break_transaction(void); /* change the atomic counter by 'delta' and return the new value. Used @@ -163,7 +163,7 @@ stm_inspect_abort_info(). (XXX details not documented yet) */ void stm_abort_info_push(gcptr obj, long fieldoffsets[]); void stm_abort_info_pop(long count); -char *stm_inspect_abort_info(void); /* turns inevitable */ +char *stm_inspect_abort_info(void); /* turns inevitable, push roots! */ /* mostly for debugging support */ void stm_abort_and_retry(void); diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c --- a/rpython/translator/stm/src_stm/stmsync.c +++ b/rpython/translator/stm/src_stm/stmsync.c @@ -10,7 +10,7 @@ static revision_t sync_required = 0; void stm_set_transaction_length(long length_max) -{ +{ /* save roots around this call! */ BecomeInevitable("set_transaction_length"); if (length_max <= 0) { length_max = 1; @@ -43,7 +43,7 @@ d->reads_size_limit_nonatomic)); /* if is_inevitable(), reads_size_limit_nonatomic should be 0 (and thus reads_size_limit too, if !d->atomic.) */ - if (*d->active_ref == 2) + if (stm_active == 2) assert(d->reads_size_limit_nonatomic == 0); #endif @@ -168,7 +168,7 @@ has configured 'reads_size_limit_nonatomic' to a smaller value. When such a shortened transaction succeeds, the next one will see its length limit doubled, up to the maximum. */ - if (counter == 0 && *d->active_ref != 2) { + if (counter == 0 && stm_active != 2) { unsigned long limit = d->reads_size_limit_nonatomic; if (limit != 0 && limit < (stm_regular_length_limit >> 1)) limit = (limit << 1) | 1; @@ -183,7 +183,7 @@ /* atomic transaction: a common case is that callback() returned even though we are atomic because we need a major GC. For that case, release and reaquire the rw lock here. */ - assert(*d->active_ref >= 1); + assert(stm_active >= 1); stm_possible_safe_point(); } @@ -218,7 +218,7 @@ { /* must save roots around this call */ struct tx_descriptor *d = thread_descriptor; if (d->atomic) { - assert(*d->active_ref >= 1); + assert(stm_active >= 1); stm_possible_safe_point(); } else { @@ -267,7 +267,7 @@ int stm_in_transaction(void) { struct tx_descriptor *d = thread_descriptor; - return d && *d->active_ref; + return d && stm_active; } /************************************************************/ @@ -337,7 +337,7 @@ void stm_partial_commit_and_resume_other_threads(void) { /* push gc roots! */ struct tx_descriptor *d = thread_descriptor; - assert(*d->active_ref == 2); + assert(stm_active == 2); int atomic = d->atomic; /* Give up atomicity during commit. This still works because @@ -391,7 +391,7 @@ /* Warning, may block waiting for rwlock_in_transaction while another thread runs a major GC */ - assert(*thread_descriptor->active_ref); + assert(stm_active); assert(in_single_thread != thread_descriptor); stm_stop_sharedlock(); From noreply at buildbot.pypy.org Tue Nov 5 23:52:18 2013 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 5 Nov 2013 23:52:18 +0100 (CET) Subject: [pypy-commit] buildbot add-header-to-nightly: use template-based rendering for nightly directory listing Message-ID: <20131105225218.754311C00F8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: add-header-to-nightly Changeset: r877:834aa45684d4 Date: 2013-11-06 00:51 +0200 http://bitbucket.org/pypy/buildbot/changeset/834aa45684d4/ Log: use template-based rendering for nightly directory listing diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -5,8 +5,8 @@ import cgi import urllib import sys -from twisted.web.static import File, DirectoryLister -from buildbot.status.web.base import path_to_root +from twisted.web.static import File +from buildbot.status.web.base import DirectoryLister class PyPyTarball(object): @@ -143,10 +143,9 @@ names = File.listNames(self) if is_pypy_dir(names): names = self.sortBuildNames(names) - Listener = PyPyDirectoryLister else: names = self.sortDirectoryNames(File.listEntities(self)) - Listener = PyPyDirectoryLister + Listener = PyPyDirectoryLister return Listener(self.path, names, self.contentTypes, @@ -157,112 +156,16 @@ pass class PyPyDirectoryLister(DirectoryLister): - template = """ - - -%%(header)s - - - - - - - -
- Home - - - - Speed - Summary (trunk) - Summary - Nightly builds - - - Waterfall - - - - Builders - - - - - - About -
-
-
-

%%(header)s

- - - - - - - - - - - - - %%(tableContent)s - -
FilenameSizeDateown testsapplevel tests
-
- - -""" - - linePattern = """ - %(text)s - %(size)s - %(date)s - %(own_summary)s - %(app_summary)s - -""" - - def render(self, request): - self.status = request.site.buildbot_service.getStatus() - self.template = self.template % {'path_to_root': path_to_root(request)} - return DirectoryLister.render(self, request) - - def _buildTableContent(self, elements): - tableContent = [] + def _getFilesAndDirectories(self, directory): + dirs, files = DirectoryLister._getFilesAndDirectories(self, directory) rowClasses = itertools.cycle(['odd', 'even']) - for element, rowClass in zip(elements, rowClasses): - element["class"] = rowClass - self._add_test_results(element, rowClass) - tableContent.append(self.linePattern % element) - return tableContent + for f, rowClass in zip(files, rowClasses): + f["class"] = rowClass + self._add_test_results(f, rowClass) + return dirs, files def _add_test_results(self, element, rowClass): filename = urllib.unquote(element['href']) diff --git a/master/templates/directory.html b/master/templates/directory.html new file mode 100644 --- /dev/null +++ b/master/templates/directory.html @@ -0,0 +1,71 @@ +{% extends "layout.html" %} +{% block morehead %} + + +{% endblock %} + +{% block content %} + +

Directory listing for {{ path }}

+ +{% set row_class = cycler('odd', 'even') %} + + + + + + + + + + + +{% for d in directories %} + + + + + + +{% endfor %} + +{% for f in files %} + + + + + + + +{% endfor %} +
FilenameSizeDateown testsapplevel tests
{{ d.text }}{{ d.size }}{{ d.type }}{{ d.encoding }}
{{ f.text }}{{ f.size }}{{ f.date }}{{ f.own_summary }}{{ f.app_summary }}
+ +{% endblock %} From noreply at buildbot.pypy.org Wed Nov 6 02:27:49 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 6 Nov 2013 02:27:49 +0100 (CET) Subject: [pypy-commit] pypy default: fix Message-ID: <20131106012749.E654C1C0225@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67860:259e6bb918cf Date: 2013-11-05 17:27 -0800 http://bitbucket.org/pypy/pypy/changeset/259e6bb918cf/ Log: fix diff --git a/rpython/rtyper/lltypesystem/rbytearray.py b/rpython/rtyper/lltypesystem/rbytearray.py --- a/rpython/rtyper/lltypesystem/rbytearray.py +++ b/rpython/rtyper/lltypesystem/rbytearray.py @@ -8,10 +8,10 @@ def mallocbytearray(size): return lltype.malloc(BYTEARRAY, size) -_, copy_bytearray_contents = rstr._new_copy_contents_fun(BYTEARRAY, BYTEARRAY, +_, _, copy_bytearray_contents = rstr._new_copy_contents_fun(BYTEARRAY, BYTEARRAY, lltype.Char, 'bytearray') -_, copy_bytearray_contents_from_str = rstr._new_copy_contents_fun(rstr.STR, +_, _, copy_bytearray_contents_from_str = rstr._new_copy_contents_fun(rstr.STR, BYTEARRAY, lltype.Char, 'bytearray_from_str') From noreply at buildbot.pypy.org Wed Nov 6 02:32:13 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 6 Nov 2013 02:32:13 +0100 (CET) Subject: [pypy-commit] pypy default: fix this assertion Message-ID: <20131106013213.6230B1C0225@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67861:20b7b762dbed Date: 2013-11-05 17:31 -0800 http://bitbucket.org/pypy/pypy/changeset/20b7b762dbed/ Log: fix this assertion diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -517,13 +517,11 @@ return elif ((src.is_virtual() or src.is_constant()) and srcstart.is_constant() and dststart.is_constant() and - length.is_constant()): + length.is_constant() and + (length.force_box(self).getint() < 20 or (src.is_virtual() and dst.is_virtual()))): src_start = srcstart.force_box(self).getint() dst_start = dststart.force_box(self).getint() - # 'length' must be <= MAX_CONST_LEN here, because 'dst' is a - # VStringPlainValue, which is limited to MAX_CONST_LEN. actual_length = length.force_box(self).getint() - assert actual_length <= MAX_CONST_LEN for index in range(actual_length): vresult = self.strgetitem(src, optimizer.ConstantValue(ConstInt(index + src_start)), mode) if isinstance(dst, VStringPlainValue): From noreply at buildbot.pypy.org Wed Nov 6 07:31:42 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 6 Nov 2013 07:31:42 +0100 (CET) Subject: [pypy-commit] pypy default: Handle weird indices here, don't try to pass length = -1 down to append_charpsize Message-ID: <20131106063142.CB3C91C0175@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67862:cab5c44a5677 Date: 2013-11-05 22:31 -0800 http://bitbucket.org/pypy/pypy/changeset/cab5c44a5677/ Log: Handle weird indices here, don't try to pass length = -1 down to append_charpsize diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -163,6 +163,8 @@ if step == 0: # index only return space.wrap(self.mmap.getitem(start)) elif step == 1: + if stop - start < 0: + return space.wrap("") return space.wrap(self.mmap.getslice(start, stop - start)) else: res = "".join([self.mmap.getitem(i) diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -525,6 +525,8 @@ m = mmap(f.fileno(), 6) assert m[-3:7] == "bar" + assert m[1:0:1] == "" + f.close() def test_sequence_type(self): diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -371,6 +371,7 @@ self._grow(times) def append_charpsize(self, s, size): + assert size >= 0 l = [] for i in xrange(size): l.append(s[i]) From noreply at buildbot.pypy.org Wed Nov 6 11:09:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 6 Nov 2013 11:09:10 +0100 (CET) Subject: [pypy-commit] pypy default: Test and fix Message-ID: <20131106100910.9C2531C13DF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67863:bed2204ab3ba Date: 2013-11-06 11:07 +0100 http://bitbucket.org/pypy/pypy/changeset/bed2204ab3ba/ Log: Test and fix diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5137,6 +5137,25 @@ """ self.optimize_strunicode_loop(ops, expected) + def test_str_copy_bug1(self): + ops = """ + [i0] + p1 = newstr(1) + strsetitem(p1, 0, i0) + p2 = newstr(1) + escape(p2) + copystrcontent(p1, p2, 0, 0, 1) + finish() + """ + expected = """ + [i0] + p2 = newstr(1) + escape(p2) + strsetitem(p2, 0, i0) + finish() + """ + self.optimize_strunicode_loop(ops, expected) + def test_call_pure_vstring_const(self): py.test.skip("implement me") ops = """ diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -154,6 +154,7 @@ return self._chars[index] # may return None! def setitem(self, index, charvalue): + assert self.is_virtual() assert isinstance(charvalue, optimizer.OptValue) assert self._chars[index] is None, ( "setitem() on an already-initialized location") @@ -524,7 +525,7 @@ actual_length = length.force_box(self).getint() for index in range(actual_length): vresult = self.strgetitem(src, optimizer.ConstantValue(ConstInt(index + src_start)), mode) - if isinstance(dst, VStringPlainValue): + if isinstance(dst, VStringPlainValue) and dst.is_virtual(): dst.setitem(index + dst_start, vresult) else: op = ResOperation(mode.STRSETITEM, [ From noreply at buildbot.pypy.org Wed Nov 6 11:11:16 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 6 Nov 2013 11:11:16 +0100 (CET) Subject: [pypy-commit] pypy default: Potential fix, or at least cleaning up Message-ID: <20131106101116.EF8FD1C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67864:8ae4fae9fcc7 Date: 2013-11-06 11:10 +0100 http://bitbucket.org/pypy/pypy/changeset/8ae4fae9fcc7/ Log: Potential fix, or at least cleaning up diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -513,19 +513,20 @@ srcstart = self.getvalue(op.getarg(2)) dststart = self.getvalue(op.getarg(3)) length = self.getvalue(op.getarg(4)) + dst_virtual = (isinstance(dst, VStringPlainValue) and dst.is_virtual()) if length.is_constant() and length.box.getint() == 0: return elif ((src.is_virtual() or src.is_constant()) and srcstart.is_constant() and dststart.is_constant() and length.is_constant() and - (length.force_box(self).getint() < 20 or (src.is_virtual() and dst.is_virtual()))): + (length.force_box(self).getint() < 20 or (src.is_virtual() and dst_virtual))): src_start = srcstart.force_box(self).getint() dst_start = dststart.force_box(self).getint() actual_length = length.force_box(self).getint() for index in range(actual_length): vresult = self.strgetitem(src, optimizer.ConstantValue(ConstInt(index + src_start)), mode) - if isinstance(dst, VStringPlainValue) and dst.is_virtual(): + if dst_virtual: dst.setitem(index + dst_start, vresult) else: op = ResOperation(mode.STRSETITEM, [ From noreply at buildbot.pypy.org Wed Nov 6 13:22:52 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 6 Nov 2013 13:22:52 +0100 (CET) Subject: [pypy-commit] stmgc default: allow nesting of stm_stop_all_other_threads()-stm_partial_commit_and_resume_other_threads() pairs Message-ID: <20131106122252.3127D1C0225@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r545:28f66a5356cd Date: 2013-11-06 13:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/28f66a5356cd/ Log: allow nesting of stm_stop_all_other_threads()-stm_partial_commit_and _resume_other_threads() pairs diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -319,17 +319,21 @@ } +static int single_thread_nesting = 0; void stm_stop_all_other_threads(void) { /* push gc roots! */ struct tx_descriptor *d; BecomeInevitable("stop_all_other_threads"); - stm_start_single_thread(); - - for (d = stm_tx_head; d; d = d->tx_next) { - if (*d->active_ref == 1) // && d != thread_descriptor) <- TRUE - AbortTransactionAfterCollect(d, ABRT_OTHER_THREADS); + if (!single_thread_nesting) { + stm_start_single_thread(); + + for (d = stm_tx_head; d; d = d->tx_next) { + if (*d->active_ref == 1) // && d != thread_descriptor) <- TRUE + AbortTransactionAfterCollect(d, ABRT_OTHER_THREADS); + } } + single_thread_nesting++; } @@ -339,20 +343,23 @@ assert(stm_active == 2); int atomic = d->atomic; - /* Give up atomicity during commit. This still works because - we keep the inevitable status, thereby being guaranteed to - commit before all others. */ - stm_atomic(-atomic); - - /* Commit and start new inevitable transaction while never - giving up the inevitable status. */ - CommitTransaction(1); /* 1=stay_inevitable! */ - BeginInevitableTransaction(1); - - /* restore atomic-count */ - stm_atomic(atomic); - - stm_stop_single_thread(); + single_thread_nesting--; + if (single_thread_nesting == 0) { + /* Give up atomicity during commit. This still works because + we keep the inevitable status, thereby being guaranteed to + commit before all others. */ + stm_atomic(-atomic); + + /* Commit and start new inevitable transaction while never + giving up the inevitable status. */ + CommitTransaction(1); /* 1=stay_inevitable! */ + BeginInevitableTransaction(1); + + /* restore atomic-count */ + stm_atomic(atomic); + + stm_stop_single_thread(); + } } void stm_start_single_thread(void) From noreply at buildbot.pypy.org Wed Nov 6 18:24:57 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 6 Nov 2013 18:24:57 +0100 (CET) Subject: [pypy-commit] pypy default: (alex, arigato) ahhh, fix a bug with unrolling copystrcontent where the dst is not virtual Message-ID: <20131106172457.33BC31C0225@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67865:03c7f7be80bb Date: 2013-11-06 09:24 -0800 http://bitbucket.org/pypy/pypy/changeset/03c7f7be80bb/ Log: (alex, arigato) ahhh, fix a bug with unrolling copystrcontent where the dst is not virtual diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5125,14 +5125,16 @@ def test_str_copy_virtual_src_concrete_dst(self): ops = """ [p0] - p1 = newstr(1) + p1 = newstr(2) strsetitem(p1, 0, 101) - copystrcontent(p1, p0, 0, 0, 1) + strsetitem(p1, 1, 102) + copystrcontent(p1, p0, 0, 0, 2) finish(p0) """ expected = """ [p0] strsetitem(p0, 0, 101) + strsetitem(p0, 1, 102) finish(p0) """ self.optimize_strunicode_loop(ops, expected) diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -529,12 +529,12 @@ if dst_virtual: dst.setitem(index + dst_start, vresult) else: - op = ResOperation(mode.STRSETITEM, [ + new_op = ResOperation(mode.STRSETITEM, [ op.getarg(1), ConstInt(index + dst_start), vresult.force_box(self), ], None) - self.emit_operation(op) + self.emit_operation(new_op) else: copy_str_content(self, src.force_box(self), From noreply at buildbot.pypy.org Wed Nov 6 18:41:09 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 6 Nov 2013 18:41:09 +0100 (CET) Subject: [pypy-commit] pypy default: Stylish fix, doesn't seem to change anything Message-ID: <20131106174109.9D41B1C01F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67866:08b6eb67086a Date: 2013-11-06 18:40 +0100 http://bitbucket.org/pypy/pypy/changeset/08b6eb67086a/ Log: Stylish fix, doesn't seem to change anything diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -530,7 +530,7 @@ dst.setitem(index + dst_start, vresult) else: new_op = ResOperation(mode.STRSETITEM, [ - op.getarg(1), + dst.force_box(self), ConstInt(index + dst_start), vresult.force_box(self), ], None) From noreply at buildbot.pypy.org Wed Nov 6 19:49:47 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 6 Nov 2013 19:49:47 +0100 (CET) Subject: [pypy-commit] stmgc default: more asserts and a just-to-be-sure change that affects performance a bit Message-ID: <20131106184947.C71491C01F6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r546:14ac008e70a5 Date: 2013-11-06 19:49 +0100 http://bitbucket.org/pypy/stmgc/changeset/14ac008e70a5/ Log: more asserts and a just-to-be-sure change that affects performance a bit diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -77,11 +77,17 @@ d->max_aborts = max_aborts; } +static void start_exclusivelock(void); +static void stop_exclusivelock(void); int stm_enter_callback_call(void) { int token = (thread_descriptor == NULL); dprintf(("enter_callback_call(tok=%d)\n", token)); if (token == 1) { + /* acquire exclusive lock. Just to be safe... + XXX: remove again when sure it is not needed + (interaction with stop_all_other_threads()) */ + start_exclusivelock(); stmgcpage_acquire_global_lock(); #ifdef STM_BARRIER_COUNT static int seen = 0; @@ -94,6 +100,7 @@ stmgc_init_nursery(); init_shadowstack(); stmgcpage_release_global_lock(); + stop_exclusivelock(); } BeginInevitableTransaction(0); return token; @@ -108,11 +115,13 @@ CommitTransaction(0); if (token == 1) { + start_exclusivelock(); stmgcpage_acquire_global_lock(); done_shadowstack(); stmgc_done_nursery(); DescriptorDone(); stmgcpage_release_global_lock(); + stop_exclusivelock(); } } @@ -292,6 +301,7 @@ void stm_stop_sharedlock(void) { + assert(in_single_thread == NULL); dprintf(("stm_stop_sharedlock\n")); //assert(stmgc_nursery_hiding(thread_descriptor, 1)); int err = pthread_rwlock_unlock(&rwlock_shared); @@ -311,6 +321,7 @@ static void stop_exclusivelock(void) { + assert(in_single_thread == NULL); dprintf(("stop_exclusivelock\n")); int err = pthread_rwlock_unlock(&rwlock_shared); if (err != 0) @@ -326,6 +337,7 @@ BecomeInevitable("stop_all_other_threads"); if (!single_thread_nesting) { + assert(in_single_thread == NULL); stm_start_single_thread(); for (d = stm_tx_head; d; d = d->tx_next) { From noreply at buildbot.pypy.org Wed Nov 6 19:54:02 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 6 Nov 2013 19:54:02 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: import stmgc that allows nesting of stop_all_other_threads()-resume_.. Message-ID: <20131106185402.125031C01F6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r67867:e1fc1f6c0821 Date: 2013-11-06 19:51 +0100 http://bitbucket.org/pypy/pypy/changeset/e1fc1f6c0821/ Log: import stmgc that allows nesting of stop_all_other_threads()-resume_.. diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -8a3b7748ba7f +14ac008e70a5 diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c --- a/rpython/translator/stm/src_stm/stmsync.c +++ b/rpython/translator/stm/src_stm/stmsync.c @@ -78,11 +78,17 @@ d->max_aborts = max_aborts; } +static void start_exclusivelock(void); +static void stop_exclusivelock(void); int stm_enter_callback_call(void) { int token = (thread_descriptor == NULL); dprintf(("enter_callback_call(tok=%d)\n", token)); if (token == 1) { + /* acquire exclusive lock. Just to be safe... + XXX: remove again when sure it is not needed + (interaction with stop_all_other_threads()) */ + start_exclusivelock(); stmgcpage_acquire_global_lock(); #ifdef STM_BARRIER_COUNT static int seen = 0; @@ -95,6 +101,7 @@ stmgc_init_nursery(); init_shadowstack(); stmgcpage_release_global_lock(); + stop_exclusivelock(); } BeginInevitableTransaction(0); return token; @@ -109,11 +116,13 @@ CommitTransaction(0); if (token == 1) { + start_exclusivelock(); stmgcpage_acquire_global_lock(); done_shadowstack(); stmgc_done_nursery(); DescriptorDone(); stmgcpage_release_global_lock(); + stop_exclusivelock(); } } @@ -293,6 +302,7 @@ void stm_stop_sharedlock(void) { + assert(in_single_thread == NULL); dprintf(("stm_stop_sharedlock\n")); //assert(stmgc_nursery_hiding(thread_descriptor, 1)); int err = pthread_rwlock_unlock(&rwlock_shared); @@ -312,6 +322,7 @@ static void stop_exclusivelock(void) { + assert(in_single_thread == NULL); dprintf(("stop_exclusivelock\n")); int err = pthread_rwlock_unlock(&rwlock_shared); if (err != 0) @@ -320,17 +331,22 @@ } +static int single_thread_nesting = 0; void stm_stop_all_other_threads(void) { /* push gc roots! */ struct tx_descriptor *d; BecomeInevitable("stop_all_other_threads"); - stm_start_single_thread(); - - for (d = stm_tx_head; d; d = d->tx_next) { - if (*d->active_ref == 1) // && d != thread_descriptor) <- TRUE - AbortTransactionAfterCollect(d, ABRT_OTHER_THREADS); + if (!single_thread_nesting) { + assert(in_single_thread == NULL); + stm_start_single_thread(); + + for (d = stm_tx_head; d; d = d->tx_next) { + if (*d->active_ref == 1) // && d != thread_descriptor) <- TRUE + AbortTransactionAfterCollect(d, ABRT_OTHER_THREADS); + } } + single_thread_nesting++; } @@ -340,20 +356,23 @@ assert(stm_active == 2); int atomic = d->atomic; - /* Give up atomicity during commit. This still works because - we keep the inevitable status, thereby being guaranteed to - commit before all others. */ - stm_atomic(-atomic); - - /* Commit and start new inevitable transaction while never - giving up the inevitable status. */ - CommitTransaction(1); /* 1=stay_inevitable! */ - BeginInevitableTransaction(1); - - /* restore atomic-count */ - stm_atomic(atomic); - - stm_stop_single_thread(); + single_thread_nesting--; + if (single_thread_nesting == 0) { + /* Give up atomicity during commit. This still works because + we keep the inevitable status, thereby being guaranteed to + commit before all others. */ + stm_atomic(-atomic); + + /* Commit and start new inevitable transaction while never + giving up the inevitable status. */ + CommitTransaction(1); /* 1=stay_inevitable! */ + BeginInevitableTransaction(1); + + /* restore atomic-count */ + stm_atomic(atomic); + + stm_stop_single_thread(); + } } void stm_start_single_thread(void) From noreply at buildbot.pypy.org Wed Nov 6 19:54:03 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 6 Nov 2013 19:54:03 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: Add some stop_all_other_threads sections to compile.py. May be completely Message-ID: <20131106185403.5755B1C01F6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r67868:002a6ee25259 Date: 2013-11-06 19:53 +0100 http://bitbucket.org/pypy/pypy/changeset/002a6ee25259/ Log: Add some stop_all_other_threads sections to compile.py. May be completely unnecessary... diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -16,6 +16,7 @@ from rpython.jit.metainterp.inliner import Inliner from rpython.jit.metainterp.resume import NUMBERING, PENDINGFIELDSP, ResumeDataDirectReader from rpython.jit.codewriter import heaptracker, longlong +from rpython.rlib import rstm def giveup(): @@ -248,12 +249,17 @@ for box in loop.inputargs: assert isinstance(box, Box) + if rgc.stm_is_enabled(): + rstm.stop_all_other_threads() target_token = loop.operations[-1].getdescr() resumekey.compile_and_attach(metainterp, loop) target_token = label.getdescr() assert isinstance(target_token, TargetToken) record_loop_or_bridge(metainterp_sd, loop) + if rgc.stm_is_enabled(): + rstm.partial_commit_and_resume_other_threads() + return target_token def patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd): @@ -899,9 +905,13 @@ if new_trace.operations[-1].getopnum() != rop.LABEL: # We managed to create a bridge. Dispatch to resumekey to # know exactly what we must do (ResumeGuardDescr/ResumeFromInterpDescr) + if rgc.stm_is_enabled(): + rstm.stop_all_other_threads() target_token = new_trace.operations[-1].getdescr() resumekey.compile_and_attach(metainterp, new_trace) record_loop_or_bridge(metainterp_sd, new_trace) + if rgc.stm_is_enabled(): + rstm.partial_commit_and_resume_other_threads() return target_token else: metainterp.retrace_needed(new_trace) From noreply at buildbot.pypy.org Wed Nov 6 21:53:46 2013 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 6 Nov 2013 21:53:46 +0100 (CET) Subject: [pypy-commit] buildbot add-header-to-nightly: handle directory listings Message-ID: <20131106205346.4236A1D2304@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: add-header-to-nightly Changeset: r878:6c6f461b0392 Date: 2013-11-06 22:53 +0200 http://bitbucket.org/pypy/buildbot/changeset/6c6f461b0392/ Log: handle directory listings diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -5,7 +5,7 @@ import cgi import urllib import sys -from twisted.web.static import File +from twisted.web.static import File, formatFileSize from buildbot.status.web.base import DirectoryLister class PyPyTarball(object): @@ -165,6 +165,15 @@ for f, rowClass in zip(files, rowClasses): f["class"] = rowClass self._add_test_results(f, rowClass) + for d in dirs: + dirname = urllib.unquote(d['href']) + dd = py.path.local(self.path).join(dirname) + date = datetime.date.fromtimestamp(dd.mtime()) + d['date'] = date.isoformat() + # Assume dir is non-recursive + size = sum([f.size() for f in dd.listdir() if f.isfile()]) + d['size'] = formatFileSize(size) + return dirs, files def _add_test_results(self, element, rowClass): diff --git a/master/templates/directory.html b/master/templates/directory.html --- a/master/templates/directory.html +++ b/master/templates/directory.html @@ -40,6 +40,7 @@ +{% if files|length > 1 %} @@ -47,13 +48,23 @@ +{% else %} + + + + + + + +{% endif %} {% for d in directories %} - - - + + + + {% endfor %} From noreply at buildbot.pypy.org Thu Nov 7 09:51:22 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 7 Nov 2013 09:51:22 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: for buildbot: make --stm a default option Message-ID: <20131107085122.5070E1C1051@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r67869:9e595d44ada8 Date: 2013-11-07 09:49 +0100 http://bitbucket.org/pypy/pypy/changeset/9e595d44ada8/ Log: for buildbot: make --stm a default option diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -99,7 +99,7 @@ BoolOption("thread", "enable use of threading primitives", default=False, cmdline="--thread"), BoolOption("stm", "enable use of Software Transactional Memory", - default=False, cmdline="--stm", + default=True, cmdline="--stm", suggests=[("translation.gc", "stmgc")], # Boehm works too requires=[("translation.thread", True), ("translation.continuation", False), # XXX for now From noreply at buildbot.pypy.org Thu Nov 7 09:56:35 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 7 Nov 2013 09:56:35 +0100 (CET) Subject: [pypy-commit] pypy default: two more action items Message-ID: <20131107085635.3F9CD1C1051@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r67870:0d72a061fa28 Date: 2013-11-07 10:55 +0200 http://bitbucket.org/pypy/pypy/changeset/0d72a061fa28/ Log: two more action items diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -47,3 +47,7 @@ * post announcement on morepypy.blogspot.com * send announcements to pypy-dev, python-list, python-announce, python-dev ... + +* add a tag on jitviewer that corresponds to pypy release +* add a tag on codespeed that corresponds to pypy release + From noreply at buildbot.pypy.org Thu Nov 7 10:55:50 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 7 Nov 2013 10:55:50 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: for buildbot: last commit wasn't enough, next try by making -Ojit imply --stm Message-ID: <20131107095550.64A2F1C13F0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r67871:47618ab473d1 Date: 2013-11-07 10:51 +0100 http://bitbucket.org/pypy/pypy/changeset/47618ab473d1/ Log: for buildbot: last commit wasn't enough, next try by making -Ojit imply --stm diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -99,7 +99,7 @@ BoolOption("thread", "enable use of threading primitives", default=False, cmdline="--thread"), BoolOption("stm", "enable use of Software Transactional Memory", - default=True, cmdline="--stm", + default=False, cmdline="--stm", suggests=[("translation.gc", "stmgc")], # Boehm works too requires=[("translation.thread", True), ("translation.continuation", False), # XXX for now @@ -325,7 +325,7 @@ 'mem': DEFL_GC + ' lowinline remove_asserts removetypeptr', '2': DEFL_GC + ' extraopts', '3': DEFL_GC + ' extraopts remove_asserts', - 'jit': DEFL_GC + ' extraopts jit', + 'jit': 'stmgc extraopts jit stm', } def set_opt_level(config, level): @@ -363,6 +363,8 @@ config.translation.suggest(jit=True) elif word == 'removetypeptr': config.translation.suggest(gcremovetypeptr=True) + elif word == 'stm': + config.translation.suggest(stm=True) else: raise ValueError(word) From noreply at buildbot.pypy.org Thu Nov 7 12:08:37 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 7 Nov 2013 12:08:37 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: disable micronumpy (delay debugging this issue) Message-ID: <20131107110837.07F751C13F0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r67872:28e078327c8f Date: 2013-11-07 11:55 +0100 http://bitbucket.org/pypy/pypy/changeset/28e078327c8f/ Log: disable micronumpy (delay debugging this issue) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -34,8 +34,9 @@ "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', - "_collections", "_multibytecodec", "micronumpy", "_ffi", + "_collections", "_multibytecodec", "_ffi", "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] + # stm disabled "micronumpy", caused translation problems once.. )) translation_modules = default_modules.copy() From noreply at buildbot.pypy.org Thu Nov 7 12:08:38 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 7 Nov 2013 12:08:38 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: Disable use of immutable read barriers in most cases. Since there can be a Message-ID: <20131107110838.663321C13FC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r67873:5f1727595de0 Date: 2013-11-07 12:07 +0100 http://bitbucket.org/pypy/pypy/changeset/5f1727595de0/ Log: Disable use of immutable read barriers in most cases. Since there can be a transaction break between allocation and initialization of the immutable field... diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -271,18 +271,19 @@ opnum = op.getopnum() descr = op.getdescr() target_category = 'R' - if opnum == rop.GETFIELD_GC: - assert isinstance(descr, FieldDescr) - if descr.is_immutable(): - target_category = 'I' - elif opnum == rop.GETINTERIORFIELD_GC: - assert isinstance(descr, InteriorFieldDescr) - if descr.is_immutable(): - target_category = 'I' - elif opnum == rop.GETARRAYITEM_GC: - assert isinstance(descr, ArrayDescr) - if descr.is_immutable(): - target_category = 'I' + # XXX: review: + # if opnum == rop.GETFIELD_GC: + # assert isinstance(descr, FieldDescr) + # if descr.is_immutable(): + # target_category = 'I' + # elif opnum == rop.GETINTERIORFIELD_GC: + # assert isinstance(descr, InteriorFieldDescr) + # if descr.is_immutable(): + # target_category = 'I' + # elif opnum == rop.GETARRAYITEM_GC: + # assert isinstance(descr, ArrayDescr) + # if descr.is_immutable(): + # target_category = 'I' self.handle_category_operations(op, target_category) diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -85,8 +85,8 @@ # field even on a stub pass - elif (op.opname in ('getarraysize', 'getinteriorarraysize') - or (is_getter and is_immutable(op))): + elif op.opname in ('getarraysize', 'getinteriorarraysize'): + # XXX: or (is_getter and is_immutable(op))): # we can't leave getarraysize or the immutable getfields # fully unmodified: we need at least immut_read_barrier # to detect stubs. From noreply at buildbot.pypy.org Thu Nov 7 12:11:21 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 7 Nov 2013 12:11:21 +0100 (CET) Subject: [pypy-commit] pypy default: Remove "from typeobject import x" from typeobject.py itself Message-ID: <20131107111121.395B61C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67874:84a635eb05a7 Date: 2013-11-07 12:10 +0100 http://bitbucket.org/pypy/pypy/changeset/84a635eb05a7/ Log: Remove "from typeobject import x" from typeobject.py itself (it comes from the merge of typetype.py into this file) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -568,8 +568,6 @@ def _create_new_type(space, w_typetype, w_name, w_bases, w_dict): # this is in its own function because we want the special case 'type(x)' # above to be seen by the jit. - from pypy.objspace.std.typeobject import W_TypeObject - if w_bases is None or w_dict is None: raise OperationError(space.w_TypeError, space.wrap("type() takes 1 or 3 arguments")) @@ -611,7 +609,6 @@ return w_type def _precheck_for_new(space, w_type): - from pypy.objspace.std.typeobject import W_TypeObject if not isinstance(w_type, W_TypeObject): raise operationerrfmt(space.w_TypeError, "X is not a type object (%T)", w_type) @@ -620,7 +617,6 @@ # ____________________________________________________________ def _check(space, w_type, w_msg=None): - from pypy.objspace.std.typeobject import W_TypeObject if not isinstance(w_type, W_TypeObject): if w_msg is None: w_msg = space.wrap("descriptor is for 'type'") @@ -653,7 +649,6 @@ return space.newtuple(w_type.bases_w) def mro_subclasses(space, w_type, temp): - from pypy.objspace.std.typeobject import W_TypeObject, compute_mro temp.append((w_type, w_type.mro_w)) compute_mro(w_type) for w_sc in w_type.get_subclasses(): @@ -662,9 +657,6 @@ def descr_set__bases__(space, w_type, w_value): # this assumes all app-level type objects are W_TypeObject - from pypy.objspace.std.typeobject import (W_TypeObject, get_parent_layout, - check_and_find_best_base, is_mro_purely_of_types) - w_type = _check(space, w_type) if not w_type.is_heaptype(): raise operationerrfmt(space.w_TypeError, @@ -728,7 +720,6 @@ assert w_type.w_same_layout_as is get_parent_layout(w_type) # invariant def descr__base(space, w_type): - from pypy.objspace.std.typeobject import find_best_base w_type = _check(space, w_type) return find_best_base(space, w_type.bases_w) From noreply at buildbot.pypy.org Thu Nov 7 19:15:27 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 7 Nov 2013 19:15:27 +0100 (CET) Subject: [pypy-commit] pypy default: Speed up converting strings/unicode to cdata in cffi by using memcpy Message-ID: <20131107181527.E61711C01F3@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67875:47990fdc93d8 Date: 2013-11-07 10:14 -0800 http://bitbucket.org/pypy/pypy/changeset/47990fdc93d8/ Log: Speed up converting strings/unicode to cdata in cffi by using memcpy diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -2,13 +2,14 @@ Pointers. """ -from pypy.interpreter.error import OperationError, operationerrfmt, wrap_oserror - from rpython.rlib import rposix from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import ovfcheck +from rpython.rtyper.annlowlevel import llstr, llunicode from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw, copy_unicode_to_raw +from pypy.interpreter.error import OperationError, operationerrfmt, wrap_oserror from pypy.module._cffi_backend import cdataobj, misc, ctypeprim, ctypevoid from pypy.module._cffi_backend.ctypeobj import W_CType @@ -90,8 +91,7 @@ "initializer string is too long for '%s'" " (got %d characters)", self.name, n) - for i in range(n): - cdata[i] = s[i] + copy_string_to_raw(llstr(s), cdata, 0, n) if n != self.length: cdata[n] = '\x00' elif isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveUniChar): @@ -105,8 +105,7 @@ " (got %d characters)", self.name, n) unichardata = rffi.cast(rffi.CWCHARP, cdata) - for i in range(n): - unichardata[i] = s[i] + copy_unicode_to_raw(llunicode(s), unichardata, 0, n) if n != self.length: unichardata[n] = u'\x00' else: @@ -157,7 +156,6 @@ return cdataobj.W_CData(self.space, ptrdata, self) def convert_from_object(self, cdata, w_ob): - space = self.space if not isinstance(w_ob, cdataobj.W_CData): raise self._convert_error("cdata pointer", w_ob) other = w_ob.ctype @@ -298,7 +296,6 @@ def convert_argument_from_object(self, cdata, w_ob): from pypy.module._cffi_backend.ctypefunc import set_mustfree_flag - space = self.space result = (not isinstance(w_ob, cdataobj.W_CData) and self._prepare_pointer_call_argument(w_ob, cdata)) if result == 0: From noreply at buildbot.pypy.org Thu Nov 7 23:14:26 2013 From: noreply at buildbot.pypy.org (ltratt) Date: Thu, 7 Nov 2013 23:14:26 +0100 (CET) Subject: [pypy-commit] pypy more_strategies: Provide fast paths in find for integer and float strategy lists. Message-ID: <20131107221426.A190A1C0175@cobra.cs.uni-duesseldorf.de> Author: Laurence Tratt Branch: more_strategies Changeset: r67876:599ed4285a6d Date: 2013-11-07 21:56 +0000 http://bitbucket.org/pypy/pypy/changeset/599ed4285a6d/ Log: Provide fast paths in find for integer and float strategy lists. This patch affects "x in l" and "l.index(x)" where l is a list. It leaves the expected common path (searching for an integer in an integer list; for a float in a flaot list) unchanged. However, comparisons of other types are significantly sped up. In some cases, we can use the type of an object to immediately prove that it can't be in the list (e.g. a user object which doesn't override __eq__ can't possibly be in an integer or float list) and return immediately; in others (e.g. when searching for a float in an integer list), we can convert the input type into a primitive that allows significantly faster comparisons. As rough examples, searching for a float in an integer list is approximately 3x faster; for a long in an integer list approximately 10x faster; searching for a string in an integer list returns immediately, no matter the size of the list. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -19,6 +19,7 @@ from pypy.objspace.std import slicetype from pypy.objspace.std.floatobject import W_FloatObject from pypy.objspace.std.intobject import W_IntObject +from pypy.objspace.std.longobject import W_LongObject from pypy.objspace.std.iterobject import (W_FastListIterObject, W_ReverseSeqIterObject) from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice @@ -1537,6 +1538,47 @@ def getitems_int(self, w_list): return self.unerase(w_list.lstorage) + _orig_find = find + def find(self, w_list, w_obj, start, stop): + # Find an element in this integer list. For integers, floats, and longs, + # we can use primitive comparisons (possibly after a conversion to an + # int). For other user types (strings and user objects which don't play + # funny tricks with __eq__ etc.) we can prove immediately that an object + # could not be in the list and return. + # + # Note: although it might seem we want to do the clever tricks first, + # we expect that the common case is searching for an integer in an + # integer list. The clauses of this if are thus ordered in likely order + # of frequency of use. + + w_objt = type(w_obj) + if w_objt is W_IntObject: + return self._safe_find(w_list, self.unwrap(w_obj), start, stop) + elif w_objt is W_FloatObject or w_objt is W_LongObject: + if w_objt is W_FloatObject: + # Asking for an int from a W_FloatObject can return either a + # W_IntObject or W_LongObject, so we then need to disambiguate + # between the two. + w_obj = self.space.int(w_obj) + w_objt = type(w_obj) + + if w_objt is W_IntObject: + intv = self.unwrap(w_obj) + else: + assert w_objt is W_LongObject + try: + intv = w_obj.toint() + except OverflowError: + # Longs which overflow can't possibly be found in an integer + # list. + raise ValueError + return self._safe_find(w_list, intv, start, stop) + elif w_objt is W_StringObject or w_objt is W_UnicodeObject: + raise ValueError + elif self.space.type(w_obj).compares_by_identity(): + raise ValueError + return self._orig_find(w_list, w_obj, start, stop) + _base_extend_from_list = _extend_from_list @@ -1581,6 +1623,19 @@ def list_is_correct_type(self, w_list): return w_list.strategy is self.space.fromcache(FloatListStrategy) + _orig_find = find + def find(self, w_list, w_obj, start, stop): + w_objt = type(w_obj) + if w_objt is W_FloatObject: + return self._safe_find(w_list, self.unwrap(w_obj), start, stop) + elif w_objt is W_IntObject or w_objt is W_LongObject: + return self._safe_find(w_list, w_obj.float_w(self.space), start, stop) + elif w_objt is W_StringObject or w_objt is W_UnicodeObject: + raise ValueError + elif self.space.type(w_obj).compares_by_identity(): + raise ValueError + return self._orig_find(w_list, w_obj, start, stop) + def sort(self, w_list, reverse): l = self.unerase(w_list.lstorage) sorter = FloatSort(l, len(l)) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -457,6 +457,39 @@ assert l.__contains__(2) assert not l.__contains__("2") assert l.__contains__(1.0) + assert l.__contains__(1.1) + assert l.__contains__(1.9) + assert l.__contains__(1L) + assert not l.__contains__(object()) + assert not l.__contains__(object()) + class t(object): + def __eq__(self, o): + if o == 2: + return True + return False + assert l.__contains__(t()) + assert not [1,3].__contains__(t()) + assert "1" not in l + + l = [1.0,2.0,3.0] + assert l.__contains__(2.0) + assert l.__contains__(2) + assert not l.__contains__(4) + assert not l.__contains__("2") + assert l.__contains__(1.0) + assert not l.__contains__(1.1) + assert l.__contains__(1L) + assert not l.__contains__(4.0) + assert not l.__contains__(object()) + assert l.__contains__(t()) + assert not [1.0,3.0].__contains__(t()) + assert "1" not in l + + import sys + l = [sys.maxint] + assert l.__contains__(sys.maxint) + assert not l.__contains__(sys.maxint + 1) + assert not l.__contains__(sys.maxint * 1.0) l = ["1","2","3"] assert l.__contains__("2") From noreply at buildbot.pypy.org Fri Nov 8 00:47:04 2013 From: noreply at buildbot.pypy.org (ltratt) Date: Fri, 8 Nov 2013 00:47:04 +0100 (CET) Subject: [pypy-commit] pypy more_strategies: Treat floats in integer lists more carefully. Message-ID: <20131107234704.8EB661C00F8@cobra.cs.uni-duesseldorf.de> Author: Laurence Tratt Branch: more_strategies Changeset: r67877:b18acdb9aaf4 Date: 2013-11-07 23:11 +0000 http://bitbucket.org/pypy/pypy/changeset/b18acdb9aaf4/ Log: Treat floats in integer lists more carefully. Previously floats were all rounded off, which leads to incorrect semantics for any float with a fractional component. A float which, when converted to an integer, doesn't compare True to itself can never match against any integer, so simply bail out when such a float is encountered. Bug pointed out to Amaury Forgeot d'Arc. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1556,10 +1556,17 @@ return self._safe_find(w_list, self.unwrap(w_obj), start, stop) elif w_objt is W_FloatObject or w_objt is W_LongObject: if w_objt is W_FloatObject: + # Floats with a fractional part can never compare True with + # respect to an integer, so we convert the float to an int and + # see if it compares True to itself or not. If it doesn't, we + # can immediately bail out. + w_objn = self.space.int(w_obj) + if not self.space.eq_w(w_obj, w_objn): + raise ValueError + w_obj = w_objn # Asking for an int from a W_FloatObject can return either a # W_IntObject or W_LongObject, so we then need to disambiguate # between the two. - w_obj = self.space.int(w_obj) w_objt = type(w_obj) if w_objt is W_IntObject: diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -457,8 +457,7 @@ assert l.__contains__(2) assert not l.__contains__("2") assert l.__contains__(1.0) - assert l.__contains__(1.1) - assert l.__contains__(1.9) + assert not l.__contains__(1.1) assert l.__contains__(1L) assert not l.__contains__(object()) assert not l.__contains__(object()) From noreply at buildbot.pypy.org Fri Nov 8 01:24:15 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 8 Nov 2013 01:24:15 +0100 (CET) Subject: [pypy-commit] pypy default: fix segfault when calling argsort on flexible arrays Message-ID: <20131108002415.A1C1A1C322F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67878:b5b32411e744 Date: 2013-11-07 18:11 -0500 http://bitbucket.org/pypy/pypy/changeset/b5b32411e744/ Log: fix segfault when calling argsort on flexible arrays diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -519,10 +519,13 @@ # by converting nonnative byte order. if self.is_scalar(): return space.wrap(0) - s = self.get_dtype().name - if not self.get_dtype().is_native(): - s = s[1:] - dtype = interp_dtype.get_dtype_cache(space).dtypes_by_name[s] + if not self.get_dtype().is_flexible_type(): + s = self.get_dtype().name + if not self.get_dtype().is_native(): + s = s[1:] + dtype = interp_dtype.get_dtype_cache(space).dtypes_by_name[s] + else: + dtype = self.get_dtype() contig = self.implementation.astype(space, dtype) return contig.argsort(space, w_axis) diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -12,8 +12,7 @@ exp = sorted(range(len(exp)), key=exp.__getitem__) c = a.copy() res = a.argsort() - assert (res == exp).all(), \ - 'a,res,dtype %r,%r,%r' % (a,res,dtype) + assert (res == exp).all(), '%r\n%r\n%r' % (a,res,exp) assert (a == c).all() # not modified a = arange(100, dtype=dtype) @@ -60,11 +59,10 @@ for dtype in ['int', 'float', 'int16', 'float32', 'uint64', 'i2', complex]: a = array([6, 4, -1, 3, 8, 3, 256+20, 100, 101], dtype=dtype) - b = sorted(list(a)) - c = a.copy() - a.sort() - assert (a == b).all(), \ - 'a,orig,dtype %r,%r,%r' % (a,c,dtype) + exp = sorted(list(a)) + res = a.copy() + res.sort() + assert (res == exp).all(), '%r\n%r\n%r' % (a,res,exp) a = arange(100, dtype=dtype) c = a.copy() @@ -85,7 +83,6 @@ #assert (a == b).all(), \ # 'a,orig,dtype %r,%r,%r' % (a,c,dtype) - # tests from numpy/tests/test_multiarray.py def test_sort_corner_cases(self): # test ordering for floats and complex containing nans. It is only @@ -307,7 +304,6 @@ assert (r == array([('a', 1), ('c', 3), ('b', 255), ('d', 258)], dtype=mydtype)).all() - # tests from numpy/tests/test_regression.py def test_sort_bigendian(self): skip('not implemented yet') @@ -325,3 +321,13 @@ y = fromstring("\x00\x01\x00\x02", dtype="S2") x.sort(kind='q') assert (x == y).all() + + def test_string_mergesort(self): + import numpypy as np + import sys + x = np.array(['a'] * 32) + if '__pypy__' in sys.builtin_module_names: + exc = raises(NotImplementedError, "x.argsort(kind='m')") + assert 'non-numeric types' in exc.value.message + else: + assert (x.argsort(kind='m') == np.arange(32)).all() From noreply at buildbot.pypy.org Fri Nov 8 02:09:59 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 8 Nov 2013 02:09:59 +0100 (CET) Subject: [pypy-commit] pypy numpy-newbyteorder: fix newbyteorder() not changing the itemtype Message-ID: <20131108010959.3784B1C325C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-newbyteorder Changeset: r67880:f0af04bbb177 Date: 2013-11-07 19:33 -0500 http://bitbucket.org/pypy/pypy/changeset/f0af04bbb177/ Log: fix newbyteorder() not changing the itemtype diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -313,7 +313,8 @@ endian = NPY_OPPBYTE if self.is_native() else NPY_NATBYTE elif newendian != NPY_IGNORE: endian = newendian - return W_Dtype(self.itemtype, self.num, self.kind, self.name, self.char, self.w_box_type, endian) + itemtype = self.itemtype.__class__(endian in (NPY_NATIVE, NPY_NATBYTE)) + return W_Dtype(itemtype, self.num, self.kind, self.name, self.char, self.w_box_type, endian) def dtype_from_list(space, w_lst): lst_w = space.listview(w_lst) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -304,6 +304,13 @@ exc = raises(ValueError, dt.newbyteorder, 'XX') assert exc.value[0] == 'XX is an unrecognized byteorder' + for t in [np.int_, np.float_]: + dt1 = np.dtype(t) + dt2 = dt1.newbyteorder() + s1 = np.array(123, dtype=dt1).tostring() + s2 = np.array(123, dtype=dt2).byteswap().tostring() + assert s1 == s2 + class AppTestTypes(BaseAppTestDtypes): def test_abstract_types(self): import numpypy as numpy From noreply at buildbot.pypy.org Fri Nov 8 02:09:58 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 8 Nov 2013 02:09:58 +0100 (CET) Subject: [pypy-commit] pypy numpy-newbyteorder: provide newbyteorder() for numpy dtypes, test Message-ID: <20131108010958.1928E1C3258@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-newbyteorder Changeset: r67879:dd6eb6413320 Date: 2013-11-07 19:17 -0500 http://bitbucket.org/pypy/pypy/changeset/dd6eb6413320/ Log: provide newbyteorder() for numpy dtypes, test diff --git a/pypy/module/micronumpy/conversion_utils.py b/pypy/module/micronumpy/conversion_utils.py --- a/pypy/module/micronumpy/conversion_utils.py +++ b/pypy/module/micronumpy/conversion_utils.py @@ -1,6 +1,27 @@ from pypy.interpreter.error import OperationError from pypy.module.micronumpy.constants import * + +def byteorder_converter(space, new_order): + endian = new_order[0] + if endian not in (NPY_BIG, NPY_LITTLE, NPY_NATIVE, NPY_IGNORE, NPY_SWAP): + ch = endian + if ch in ('b', 'B'): + endian = NPY_BIG + elif ch in ('l', 'L'): + endian = NPY_LITTLE + elif ch in ('n', 'N'): + endian = NPY_NATIVE + elif ch in ('i', 'I'): + endian = NPY_IGNORE + elif ch in ('s', 'S'): + endian = NPY_SWAP + else: + raise OperationError(space.w_ValueError, space.wrap( + "%s is an unrecognized byteorder" % new_order)) + return endian + + def clipmode_converter(space, w_mode): if space.is_none(w_mode): return NPY_RAISE @@ -19,6 +40,7 @@ raise OperationError(space.w_TypeError, space.wrap("clipmode not understood")) + def order_converter(space, w_order, default): if space.is_none(w_order): return default diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -9,6 +9,7 @@ from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong from rpython.rtyper.lltypesystem import rffi from rpython.rlib import jit +from pypy.module.micronumpy.conversion_utils import byteorder_converter from pypy.module.micronumpy.constants import * @@ -303,6 +304,17 @@ fields = space.getitem(w_data, space.wrap(4)) self.set_fields(space, fields) + @unwrap_spec(new_order=str) + def descr_newbyteorder(self, space, new_order=NPY_SWAP): + newendian = byteorder_converter(space, new_order) + endian = self.byteorder + if endian != NPY_IGNORE: + if newendian == NPY_SWAP: + endian = NPY_OPPBYTE if self.is_native() else NPY_NATBYTE + elif newendian != NPY_IGNORE: + endian = newendian + return W_Dtype(self.itemtype, self.num, self.kind, self.name, self.char, self.w_box_type, endian) + def dtype_from_list(space, w_lst): lst_w = space.listview(w_lst) fields = {} @@ -400,6 +412,7 @@ __reduce__ = interp2app(W_Dtype.descr_reduce), __setstate__ = interp2app(W_Dtype.descr_setstate), + newbyteorder = interp2app(W_Dtype.descr_newbyteorder), type = interp_attrproperty_w("w_box_type", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -268,6 +268,42 @@ assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, '<', None, None, None, -1, -1, 0)) assert loads(dumps(a.dtype)) == a.dtype + def test_newbyteorder(self): + import numpypy as np + import sys + sys_is_le = sys.byteorder == 'little' + native_code = sys_is_le and '<' or '>' + swapped_code = sys_is_le and '>' or '<' + native_dt = np.dtype(native_code+'i2') + swapped_dt = np.dtype(swapped_code+'i2') + assert native_dt.newbyteorder('S') == swapped_dt + assert native_dt.newbyteorder() == swapped_dt + assert native_dt == swapped_dt.newbyteorder('S') + assert native_dt == swapped_dt.newbyteorder('=') + assert native_dt == swapped_dt.newbyteorder('N') + assert native_dt == native_dt.newbyteorder('|') + assert np.dtype('i2') == native_dt.newbyteorder('>') + assert np.dtype('>i2') == native_dt.newbyteorder('B') + + for t in [np.int_, np.float_]: + dt = np.dtype(t) + dt1 = dt.newbyteorder().newbyteorder() + dt2 = dt.newbyteorder("<") + dt3 = dt.newbyteorder(">") + assert dt.byteorder != dt1.byteorder + #assert hash(dt) == hash(dt1) + if dt == dt2: + assert dt.byteorder != dt2.byteorder + #assert hash(dt) == hash(dt2) + else: + assert dt.byteorder != dt3.byteorder + #assert hash(dt) == hash(dt3) + + exc = raises(ValueError, dt.newbyteorder, 'XX') + assert exc.value[0] == 'XX is an unrecognized byteorder' + class AppTestTypes(BaseAppTestDtypes): def test_abstract_types(self): import numpypy as numpy From noreply at buildbot.pypy.org Fri Nov 8 11:35:59 2013 From: noreply at buildbot.pypy.org (ltratt) Date: Fri, 8 Nov 2013 11:35:59 +0100 (CET) Subject: [pypy-commit] pypy more_strategies: Remove unnecessary double type-check. Message-ID: <20131108103559.58E8D1C1402@cobra.cs.uni-duesseldorf.de> Author: Laurence Tratt Branch: more_strategies Changeset: r67881:1f36c73c569a Date: 2013-11-08 10:29 +0000 http://bitbucket.org/pypy/pypy/changeset/1f36c73c569a/ Log: Remove unnecessary double type-check. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1538,7 +1538,6 @@ def getitems_int(self, w_list): return self.unerase(w_list.lstorage) - _orig_find = find def find(self, w_list, w_obj, start, stop): # Find an element in this integer list. For integers, floats, and longs, # we can use primitive comparisons (possibly after a conversion to an @@ -1584,7 +1583,7 @@ raise ValueError elif self.space.type(w_obj).compares_by_identity(): raise ValueError - return self._orig_find(w_list, w_obj, start, stop) + return ListStrategy.find(self, w_list, w_obj, start, stop) _base_extend_from_list = _extend_from_list @@ -1630,7 +1629,6 @@ def list_is_correct_type(self, w_list): return w_list.strategy is self.space.fromcache(FloatListStrategy) - _orig_find = find def find(self, w_list, w_obj, start, stop): w_objt = type(w_obj) if w_objt is W_FloatObject: @@ -1641,7 +1639,7 @@ raise ValueError elif self.space.type(w_obj).compares_by_identity(): raise ValueError - return self._orig_find(w_list, w_obj, start, stop) + return ListStrategy.find(self, w_list, w_obj, start, stop) def sort(self, w_list, reverse): l = self.unerase(w_list.lstorage) From noreply at buildbot.pypy.org Fri Nov 8 11:41:09 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 8 Nov 2013 11:41:09 +0100 (CET) Subject: [pypy-commit] pypy default: Attempting to reduce the randomness shown on codespeed: split the Message-ID: <20131108104109.6C1CA1C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67882:456028ed293d Date: 2013-11-08 11:40 +0100 http://bitbucket.org/pypy/pypy/changeset/456028ed293d/ Log: Attempting to reduce the randomness shown on codespeed: split the timetable in two parts, one reserved for entering new loops and the other reserved for guards. diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -514,7 +514,8 @@ # if metainterp_sd.warmrunnerdesc is not None: # for tests jitcounter = metainterp_sd.warmrunnerdesc.jitcounter - self.status = jitcounter.fetch_next_index() << self.ST_SHIFT + index = jitcounter.in_second_half(jitcounter.fetch_next_index()) + self.status = index << self.ST_SHIFT def make_a_counter_per_value(self, guard_value_op): assert guard_value_op.getopnum() == rop.GUARD_VALUE @@ -598,7 +599,7 @@ hash = (current_object_addr_as_int(self) * 777767777 + intval * 1442968193) - index = jitcounter.get_index(hash) + index = jitcounter.in_second_half(jitcounter.get_index(hash)) # increment = jitdriver_sd.warmstate.increment_trace_eagerness return jitcounter.tick(index, increment) diff --git a/rpython/jit/metainterp/counter.py b/rpython/jit/metainterp/counter.py --- a/rpython/jit/metainterp/counter.py +++ b/rpython/jit/metainterp/counter.py @@ -18,11 +18,20 @@ while (UINT32MAX >> self.shift) != size - 1: self.shift += 1 assert self.shift < 999, "size is not a power of two <= 2**31" - self.timetable = lltype.malloc(rffi.CArray(rffi.FLOAT), size, + # + # The table of timings. The first half is used for starting the + # compilation of new loops. The second half is used for turning + # failing guards into bridges. The two halves are split to avoid + # too much interference. + self.timetablesize = size * 2 + self.timetable = lltype.malloc(rffi.CArray(rffi.FLOAT), + self.timetablesize, flavor='raw', zero=True, track_allocation=False) + self._nextindex = r_uint(0) + # + # The table of JitCell entries, recording already-compiled loops self.celltable = [None] * size - self._nextindex = r_uint(0) # if translator is not None: class Glob: @@ -61,6 +70,10 @@ self._nextindex = (result + 1) & self.get_index(-1) return result + def in_second_half(self, index): + assert index < r_uint(self.size) + return self.size + index + def tick(self, index, increment): counter = float(self.timetable[index]) + increment if counter < 1.0: @@ -112,7 +125,7 @@ # important in corner cases where we would suddenly compile more # than one loop because all counters reach the bound at the same # time, but where compiling all but the first one is pointless. - size = self.size + size = self.timetablesize pypy__decay_jit_counters(self.timetable, self.decay_by_mult, size) @@ -152,6 +165,10 @@ "NOT_RPYTHON" pass + def in_second_half(self, index): + "NOT_RPYTHON" + return index + 12345 + def _clear_all(self): self.timetable.clear() self.celltable.clear() From noreply at buildbot.pypy.org Fri Nov 8 14:10:02 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 8 Nov 2013 14:10:02 +0100 (CET) Subject: [pypy-commit] buildbot numpy-tests: move numpy compat build step from benchmark to NativeNumpyTests Message-ID: <20131108131002.E0AAC1C00F8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-tests Changeset: r879:3ca416f65160 Date: 2013-11-08 15:09 +0200 http://bitbucket.org/pypy/buildbot/changeset/3ca416f65160/ Log: move numpy compat build step from benchmark to NativeNumpyTests diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -642,20 +642,6 @@ locks=[lock.access('counting')], ) ) - if host == 'tannit': - pypy_c_rel = 'build/pypy/goal/pypy-c' - self.addStep(ShellCmd( - env={'PYTHONPATH': './benchmarks/lib/jinja2'}, - description="measure numpy compatibility", - command=[pypy_c_rel, - 'build/pypy/module/micronumpy/tool/numready/', - pypy_c_rel, 'numpy-compat.html'], - workdir=".")) - resfile = os.path.expanduser("~/numpy_compat/%(got_revision)s.html") - self.addStep(NumpyStatusUpload( - slavesrc="numpy-compat.html", - masterdest=WithProperties(resfile), - workdir=".")) pypy_c_rel = "../build/pypy/goal/pypy-c" self.addStep(ShellCmd( # this step needs exclusive access to the CPU @@ -801,6 +787,7 @@ ''' def __init__(self, platform='linux', app_tests=False, + host = 'tannit', lib_python=False, pypyjit=True, prefix=None, @@ -869,3 +856,16 @@ workdir='install', #env={"PYTHONPATH": ['download']}, # shouldn't be needed, but what if it is set externally? )) + if host == 'tannit': + pypy_c_rel = 'install/bin/python' + self.addStep(ShellCmd( + description="measure numpy compatibility", + command=[pypy_c_rel, + 'numpy_src/tools/numready/', + pypy_c_rel, 'numpy-compat.html'], + workdir=".")) + resfile = os.path.expanduser("~/numpy_compat/%(got_revision)s.html") + self.addStep(NumpyStatusUpload( + slavesrc="numpy-compat.html", + masterdest=WithProperties(resfile), + workdir=".")) From noreply at buildbot.pypy.org Fri Nov 8 17:20:33 2013 From: noreply at buildbot.pypy.org (ltratt) Date: Fri, 8 Nov 2013 17:20:33 +0100 (CET) Subject: [pypy-commit] pypy more_strategies: Collapse two identical cases. Message-ID: <20131108162033.2F2921C019D@cobra.cs.uni-duesseldorf.de> Author: Laurence Tratt Branch: more_strategies Changeset: r67883:31b5f4d5ba4b Date: 2013-11-08 11:34 +0000 http://bitbucket.org/pypy/pypy/changeset/31b5f4d5ba4b/ Log: Collapse two identical cases. No functional change. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1579,9 +1579,8 @@ # list. raise ValueError return self._safe_find(w_list, intv, start, stop) - elif w_objt is W_StringObject or w_objt is W_UnicodeObject: - raise ValueError - elif self.space.type(w_obj).compares_by_identity(): + elif w_objt is W_StringObject or w_objt is W_UnicodeObject \ + or self.space.type(w_obj).compares_by_identity(): raise ValueError return ListStrategy.find(self, w_list, w_obj, start, stop) @@ -1635,9 +1634,8 @@ return self._safe_find(w_list, self.unwrap(w_obj), start, stop) elif w_objt is W_IntObject or w_objt is W_LongObject: return self._safe_find(w_list, w_obj.float_w(self.space), start, stop) - elif w_objt is W_StringObject or w_objt is W_UnicodeObject: - raise ValueError - elif self.space.type(w_obj).compares_by_identity(): + elif w_objt is W_StringObject or w_objt is W_UnicodeObject \ + or self.space.type(w_obj).compares_by_identity(): raise ValueError return ListStrategy.find(self, w_list, w_obj, start, stop) From noreply at buildbot.pypy.org Fri Nov 8 17:20:34 2013 From: noreply at buildbot.pypy.org (ltratt) Date: Fri, 8 Nov 2013 17:20:34 +0100 (CET) Subject: [pypy-commit] pypy more_strategies: Be more conservative about comparing floats within an integer list. Message-ID: <20131108162034.7897A1C1402@cobra.cs.uni-duesseldorf.de> Author: Laurence Tratt Branch: more_strategies Changeset: r67884:08eb5e457fba Date: 2013-11-08 16:03 +0000 http://bitbucket.org/pypy/pypy/changeset/08eb5e457fba/ Log: Be more conservative about comparing floats within an integer list. Very large floats can have no representation as a machine integer on a 64 bit machine. Rather than encoding lots of clever logic, be simple and conservative: any float whose representation as an int compraes true to the original float can go through the fast path. Otherwise use the slow path. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1555,13 +1555,18 @@ return self._safe_find(w_list, self.unwrap(w_obj), start, stop) elif w_objt is W_FloatObject or w_objt is W_LongObject: if w_objt is W_FloatObject: - # Floats with a fractional part can never compare True with - # respect to an integer, so we convert the float to an int and - # see if it compares True to itself or not. If it doesn't, we - # can immediately bail out. + # We take a conservative approach to floats. Any float which, + # when converted into an integer compares true to the + # original float, can be compared using a fast case. When that + # isn't true, it either means the float is fractional or it's + # got to the range that doubles can't accurately represent + # (e.g. float(2**53+1) == 2**53+1 evaluates to False). Rather + # than encoding potentially platform dependent stuff here, we + # simply fall back on the slow-case to be sure we're not + # unintentionally changing number semantics. w_objn = self.space.int(w_obj) if not self.space.eq_w(w_obj, w_objn): - raise ValueError + return ListStrategy.find(self, w_list, w_obj, start, stop) w_obj = w_objn # Asking for an int from a W_FloatObject can return either a # W_IntObject or W_LongObject, so we then need to disambiguate From noreply at buildbot.pypy.org Fri Nov 8 17:23:23 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 8 Nov 2013 17:23:23 +0100 (CET) Subject: [pypy-commit] cffi c99-array: Adding more direct support for C99-style arrays inside structs Message-ID: <20131108162323.2D3361C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c99-array Changeset: r1382:917a9131f639 Date: 2013-11-06 15:17 +0100 http://bitbucket.org/cffi/cffi/changeset/917a9131f639/ Log: Adding more direct support for C99-style arrays inside structs From noreply at buildbot.pypy.org Fri Nov 8 17:23:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 8 Nov 2013 17:23:24 +0100 (CET) Subject: [pypy-commit] cffi c99-array: Some target tests Message-ID: <20131108162324.654001C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c99-array Changeset: r1383:557359f8477c Date: 2013-11-06 15:18 +0100 http://bitbucket.org/cffi/cffi/changeset/557359f8477c/ Log: Some target tests diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -479,32 +479,71 @@ s = ffi.new("struct foo_s *") assert ffi.sizeof(s.a) == 17 * ffi.sizeof('int') -def test_struct_array_guess_length(): +def test_struct_array_no_length(): ffi = FFI() - ffi.cdef("struct foo_s { int a[]; ...; };") # <= no declared length - ffi.verify("struct foo_s { int x; int a[17]; int y; };") - assert ffi.sizeof('struct foo_s') == 19 * ffi.sizeof('int') - s = ffi.new("struct foo_s *") - assert ffi.sizeof(s.a) == 17 * ffi.sizeof('int') - -def test_struct_array_guess_length_2(): - ffi = FFI() - ffi.cdef("struct foo_s { int a[]; ...; };\n" # <= no declared length + ffi.cdef("struct foo_s { int a[]; int y; ...; };\n" "int bar(struct foo_s *);\n") lib = ffi.verify("struct foo_s { int x; int a[17]; int y; };\n" "int bar(struct foo_s *f) { return f->a[14]; }\n") assert ffi.sizeof('struct foo_s') == 19 * ffi.sizeof('int') s = ffi.new("struct foo_s *") + assert ffi.typeof(s.a) is ffi.typeof('int *') # because no length s.a[14] = 4242 assert lib.bar(s) == 4242 + # with no declared length, out-of-bound accesses are not detected + s.a[17] = -521 + assert s.y == s.a[17] == -521 + # + s = ffi.new("struct foo_s *", {'a': list(range(17))}) + assert s.a[16] == 16 + # overflows at construction time not detected either + s = ffi.new("struct foo_s *", {'a': list(range(18))}) + assert s.y == s.a[17] == 17 -def test_struct_array_guess_length_3(): +def test_struct_array_guess_length(): ffi = FFI() ffi.cdef("struct foo_s { int a[...]; };") ffi.verify("struct foo_s { int x; int a[17]; int y; };") assert ffi.sizeof('struct foo_s') == 19 * ffi.sizeof('int') s = ffi.new("struct foo_s *") assert ffi.sizeof(s.a) == 17 * ffi.sizeof('int') + py.test.raises(IndexError, 's.a[17]') + +def test_struct_array_c99_1(): + if sys.platform == 'win32': + py.test.skip("requires C99") + ffi = FFI() + ffi.cdef("struct foo_s { int x; int a[]; };") + ffi.verify("struct foo_s { int x; int a[]; };") + assert ffi.sizeof('struct foo_s') == 1 * ffi.sizeof('int') + s = ffi.new("struct foo_s *", [424242, 4]) + assert ffi.sizeof(s) == 5 * ffi.sizeof('int') + assert s.a[3] == 0 + s = ffi.new("struct foo_s *", [424242, [-40, -30, -20, -10]]) + assert ffi.sizeof(s) == 5 * ffi.sizeof('int') + assert s.a[3] == -10 + s = ffi.new("struct foo_s *") + assert ffi.sizeof(s) == 1 * ffi.sizeof('int') + s = ffi.new("struct foo_s *", [424242]) + assert ffi.sizeof(s) == 1 * ffi.sizeof('int') + +def test_struct_array_c99_2(): + if sys.platform == 'win32': + py.test.skip("requires C99") + ffi = FFI() + ffi.cdef("struct foo_s { int x; int a[]; ...; };") + ffi.verify("struct foo_s { int x, y; int a[]; };") + assert ffi.sizeof('struct foo_s') == 2 * ffi.sizeof('int') + s = ffi.new("struct foo_s *", [424242, 4]) + assert ffi.sizeof(s) == 6 * ffi.sizeof('int') + assert s.a[3] == 0 + s = ffi.new("struct foo_s *", [424242, [-40, -30, -20, -10]]) + assert ffi.sizeof(s) == 6 * ffi.sizeof('int') + assert s.a[3] == -10 + s = ffi.new("struct foo_s *") + assert ffi.sizeof(s) == 2 * ffi.sizeof('int') + s = ffi.new("struct foo_s *", [424242]) + assert ffi.sizeof(s) == 2 * ffi.sizeof('int') def test_struct_ptr_to_array_field(): ffi = FFI() From noreply at buildbot.pypy.org Fri Nov 8 17:23:25 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 8 Nov 2013 17:23:25 +0100 (CET) Subject: [pypy-commit] cffi c99-array: in-progress Message-ID: <20131108162325.8E5B01C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c99-array Changeset: r1384:1a0885eace01 Date: 2013-11-08 17:22 +0100 http://bitbucket.org/cffi/cffi/changeset/1a0885eace01/ Log: in-progress diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -508,7 +508,7 @@ if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): self._partial_length = True - return None + return '...' # raise api.FFIError("unsupported expression: expected a " "simple numeric constant") diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -212,10 +212,10 @@ self.item = item self.length = length # - if self.length is None: + if length is None or length == '...': brackets = '&[]' else: - brackets = '&[%d]' % self.length + brackets = '&[%d]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) @@ -223,6 +223,10 @@ return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): + if self.length == '...': + from . import api + raise api.CDefError("cannot render the type %r: unknown length" % + (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) @@ -300,20 +304,21 @@ return # not completing it: it's an opaque struct # self.completed = 1 - fldtypes = tuple(tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes) # if self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) ffi._backend.complete_struct_or_union(BType, lst, self) # else: + fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # - if isinstance(ftype, ArrayType) and ftype.length is None: + if isinstance(ftype, ArrayType) and ftype.length == '...': # fix the length to match the total size BItemType = ftype.item.get_cached_btype(ffi, finishlist) nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) @@ -324,18 +329,17 @@ ftype = ftype.resolve_length(nlen) self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) - BArrayType = ftype.get_cached_btype(ffi, finishlist) - fldtypes = (fldtypes[:i] + (BArrayType,) + - fldtypes[i+1:]) - continue # - bitemsize = ffi.sizeof(fldtypes[i]) + BFieldType = ftype.get_cached_btype(ffi, finishlist) + bitemsize = ffi.sizeof(BFieldType) if bitemsize != fsize: self._verification_error( "field '%s.%s' is declared as %d bytes, but is " "really %d bytes" % (self.name, self.fldnames[i] or '{}', bitemsize, fsize)) + fldtypes.append(BFieldType) + # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) ffi._backend.complete_struct_or_union(BType, lst, self, totalsize, totalalignment) From noreply at buildbot.pypy.org Fri Nov 8 19:48:54 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 8 Nov 2013 19:48:54 +0100 (CET) Subject: [pypy-commit] cffi c99-array: Implementation: support a few extra ways to give initialization arguments to ffi.new() Message-ID: <20131108184854.A12221C1473@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c99-array Changeset: r1385:fb634a3e8dfc Date: 2013-11-08 19:48 +0100 http://bitbucket.org/cffi/cffi/changeset/fb634a3e8dfc/ Log: Implementation: support a few extra ways to give initialization arguments to ffi.new() for var-sized structs. See the test. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -86,6 +86,7 @@ #define CT_IS_BOOL 131072 #define CT_IS_FILE 262144 #define CT_IS_VOID_PTR 524288 +#define CT_WITH_VAR_ARRAY 1048576 #define CT_PRIMITIVE_ANY (CT_PRIMITIVE_SIGNED | \ CT_PRIMITIVE_UNSIGNED | \ CT_PRIMITIVE_CHAR | \ @@ -1007,9 +1008,39 @@ static int /* forward */ convert_from_object_bitfield(char *data, CFieldObject *cf, PyObject *init); +static Py_ssize_t +get_new_array_length(PyObject **pvalue) +{ + PyObject *value = *pvalue; + + if (PyList_Check(value) || PyTuple_Check(value)) { + return PySequence_Fast_GET_SIZE(value); + } + else if (PyBytes_Check(value)) { + /* from a string, we add the null terminator */ + return PyBytes_GET_SIZE(value) + 1; + } + else if (PyUnicode_Check(value)) { + /* from a unicode, we add the null terminator */ + return _my_PyUnicode_SizeAsWideChar(value) + 1; + } + else { + Py_ssize_t explicitlength; + explicitlength = PyNumber_AsSsize_t(value, PyExc_OverflowError); + if (explicitlength < 0) { + if (!PyErr_Occurred()) + PyErr_SetString(PyExc_ValueError, "negative array length"); + return -1; + } + *pvalue = Py_None; + return explicitlength; + } +} + static int convert_field_from_object(char *data, CFieldObject *cf, PyObject *value) { + data += cf->cf_offset; if (cf->cf_bitshift >= 0) return convert_from_object_bitfield(data, cf, value); else @@ -1017,6 +1048,45 @@ } static int +convert_vfield_from_object(char *data, CFieldObject *cf, PyObject *value, + Py_ssize_t *optvarsize) +{ + /* a special case for var-sized C99 arrays */ + if ((cf->cf_type->ct_flags & CT_ARRAY) && cf->cf_type->ct_size < 0) { + Py_ssize_t varsizelength = get_new_array_length(&value); + if (varsizelength < 0) + return -1; + if (optvarsize != NULL) { + /* in this mode, the only purpose of this function is to compute + the real size of the structure from a var-sized C99 array */ + Py_ssize_t size, itemsize; + assert(data == NULL); + itemsize = cf->cf_type->ct_itemdescr->ct_size; + size = cf->cf_offset + itemsize * varsizelength; + if (size < 0 || + ((size - cf->cf_offset) / itemsize) != varsizelength) { + PyErr_SetString(PyExc_OverflowError, + "array size would overflow a Py_ssize_t"); + return -1; + } + if (size > *optvarsize) + *optvarsize = size; + return 0; + } + /* if 'value' was only an integer, get_new_array_length() returns + it and convert 'value' to be None. Detect if this was the case, + and if so, stop here, leaving the content uninitialized + (it should be zero-initialized from somewhere else). */ + if (value == Py_None) + return 0; + } + if (optvarsize == NULL) + return convert_field_from_object(data, cf, value); + else + return 0; +} + +static int convert_array_from_object(char *data, CTypeDescrObject *ct, PyObject *init) { /* used by convert_from_object(), and also to decode lists/tuples/unicodes @@ -1097,6 +1167,63 @@ } static int +convert_struct_from_object(char *data, CTypeDescrObject *ct, PyObject *init, + Py_ssize_t *optvarsize) +{ + const char *expected; + + if (ct->ct_flags & CT_UNION) { + Py_ssize_t n = PyObject_Size(init); + if (n < 0) + return -1; + if (n > 1) { + PyErr_Format(PyExc_ValueError, + "initializer for '%s': %zd items given, but " + "only one supported (use a dict if needed)", + ct->ct_name, n); + return -1; + } + } + if (PyList_Check(init) || PyTuple_Check(init)) { + PyObject **items = PySequence_Fast_ITEMS(init); + Py_ssize_t i, n = PySequence_Fast_GET_SIZE(init); + CFieldObject *cf = (CFieldObject *)ct->ct_extra; + + for (i=0; ict_name, n); + return -1; + } + if (convert_vfield_from_object(data, cf, items[i], optvarsize) < 0) + return -1; + cf = cf->cf_next; + } + return 0; + } + if (PyDict_Check(init)) { + PyObject *d_key, *d_value; + Py_ssize_t i = 0; + CFieldObject *cf; + + while (PyDict_Next(init, &i, &d_key, &d_value)) { + cf = (CFieldObject *)PyDict_GetItem(ct->ct_stuff, d_key); + if (cf == NULL) { + PyErr_SetObject(PyExc_KeyError, d_key); + return -1; + } + if (convert_vfield_from_object(data, cf, d_value, optvarsize) < 0) + return -1; + } + return 0; + } + expected = optvarsize == NULL ? "list or tuple or dict or struct-cdata" + : "list or tuple or dict"; + return _convert_error(init, ct->ct_name, expected); +} + +static int convert_from_object(char *data, CTypeDescrObject *ct, PyObject *init) { const char *expected; @@ -1209,56 +1336,7 @@ return 0; } } - if (ct->ct_flags & CT_UNION) { - Py_ssize_t n = PyObject_Size(init); - if (n < 0) - return -1; - if (n > 1) { - PyErr_Format(PyExc_ValueError, - "initializer for '%s': %zd items given, but " - "only one supported (use a dict if needed)", - ct->ct_name, n); - return -1; - } - } - if (PyList_Check(init) || PyTuple_Check(init)) { - PyObject **items = PySequence_Fast_ITEMS(init); - Py_ssize_t i, n = PySequence_Fast_GET_SIZE(init); - CFieldObject *cf = (CFieldObject *)ct->ct_extra; - - for (i=0; ict_name, n); - return -1; - } - if (convert_field_from_object(data + cf->cf_offset, - cf, items[i]) < 0) - return -1; - cf = cf->cf_next; - } - return 0; - } - if (PyDict_Check(init)) { - PyObject *d_key, *d_value; - Py_ssize_t i = 0; - CFieldObject *cf; - - while (PyDict_Next(init, &i, &d_key, &d_value)) { - cf = (CFieldObject *)PyDict_GetItem(ct->ct_stuff, d_key); - if (cf == NULL) { - PyErr_SetObject(PyExc_KeyError, d_key); - return -1; - } - if (convert_field_from_object(data + cf->cf_offset, - cf, d_value) < 0) - return -1; - } - return 0; - } - expected = "list or tuple or dict or struct-cdata"; - goto cannot_convert; + return convert_struct_from_object(data, ct, init, NULL); } PyErr_Format(PyExc_SystemError, "convert_from_object: '%s'", ct->ct_name); @@ -2068,9 +2146,8 @@ cf = (CFieldObject *)PyDict_GetItem(ct->ct_stuff, attr); if (cf != NULL) { /* write the field 'cf' */ - char *data = cd->c_data + cf->cf_offset; if (value != NULL) { - return convert_field_from_object(data, cf, value); + return convert_field_from_object(cd->c_data, cf, value); } else { PyErr_SetString(PyExc_AttributeError, @@ -2642,32 +2719,21 @@ } if (ctitem->ct_flags & CT_PRIMITIVE_CHAR) datasize *= 2; /* forcefully add another character: a null */ + + if ((ctitem->ct_flags & CT_WITH_VAR_ARRAY) && init != Py_None) { + Py_ssize_t optvarsize = datasize; + if (convert_struct_from_object(NULL,ctitem, init, &optvarsize) < 0) + return NULL; + datasize = optvarsize; + } } else if (ct->ct_flags & CT_ARRAY) { dataoffset = offsetof(CDataObject_own_nolength, alignment); datasize = ct->ct_size; if (datasize < 0) { - if (PyList_Check(init) || PyTuple_Check(init)) { - explicitlength = PySequence_Fast_GET_SIZE(init); - } - else if (PyBytes_Check(init)) { - /* from a string, we add the null terminator */ - explicitlength = PyBytes_GET_SIZE(init) + 1; - } - else if (PyUnicode_Check(init)) { - /* from a unicode, we add the null terminator */ - explicitlength = _my_PyUnicode_SizeAsWideChar(init) + 1; - } - else { - explicitlength = PyNumber_AsSsize_t(init, PyExc_OverflowError); - if (explicitlength < 0) { - if (!PyErr_Occurred()) - PyErr_SetString(PyExc_ValueError, - "negative array length"); - return NULL; - } - init = Py_None; - } + explicitlength = get_new_array_length(&init); + if (explicitlength < 0) + return NULL; ctitem = ct->ct_itemdescr; dataoffset = offsetof(CDataObject_own_length, alignment); datasize = explicitlength * ctitem->ct_size; @@ -3554,11 +3620,17 @@ goto error; if (ftype->ct_size < 0) { - PyErr_Format(PyExc_TypeError, - "field '%s.%s' has ctype '%s' of unknown size", - ct->ct_name, PyText_AS_UTF8(fname), - ftype->ct_name); - goto error; + if ((ftype->ct_flags & CT_ARRAY) && fbitsize < 0 + && i == nb_fields - 1) { + ct->ct_flags |= CT_WITH_VAR_ARRAY; + } + else { + PyErr_Format(PyExc_TypeError, + "field '%s.%s' has ctype '%s' of unknown size", + ct->ct_name, PyText_AS_UTF8(fname), + ftype->ct_name); + goto error; + } } if (is_union) @@ -3632,7 +3704,8 @@ goto error; previous = &(*previous)->cf_next; } - boffset += ftype->ct_size * 8; + if (ftype->ct_size >= 0) + boffset += ftype->ct_size * 8; prev_bitfield_size = 0; } else { diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -2963,6 +2963,94 @@ _test_bitfield_details(flag=4) +def test_struct_array_no_length(): + BInt = new_primitive_type("int") + BIntP = new_pointer_type(BInt) + BArray = new_array_type(BIntP, None) + BStruct = new_struct_type("foo") + py.test.raises(TypeError, complete_struct_or_union, + BStruct, [('x', BArray), + ('y', BInt)]) + # + BStruct = new_struct_type("foo") + complete_struct_or_union(BStruct, [('x', BInt), + ('y', BArray)]) + assert sizeof(BStruct) == size_of_int() + d = BStruct.fields + assert len(d) == 2 + assert d[0][0] == 'x' + assert d[0][1].type is BInt + assert d[0][1].offset == 0 + assert d[0][1].bitshift == -1 + assert d[0][1].bitsize == -1 + assert d[1][0] == 'y' + assert d[1][1].type is BArray + assert d[1][1].offset == size_of_int() + assert d[1][1].bitshift == -1 + assert d[1][1].bitsize == -1 + # + p = newp(new_pointer_type(BStruct)) + p.x = 42 + assert p.x == 42 + assert typeof(p.y) is BIntP + assert p.y == cast(BIntP, p) + 1 + # + p = newp(new_pointer_type(BStruct), [100]) + assert p.x == 100 + # + # Tests for + # ffi.new("struct_with_var_array *", [field.., [the_array_items..]]) + # ffi.new("struct_with_var_array *", [field.., array_size]) + plist = [] + for i in range(20): + if i % 2 == 0: + p = newp(new_pointer_type(BStruct), [100, [200, i, 400]]) + else: + p = newp(new_pointer_type(BStruct), [100, 3]) + p.y[1] = i + p.y[0] = 200 + assert p.y[2] == 0 + p.y[2] = 400 + plist.append(p) + for i in range(20): + p = plist[i] + assert p.x == 100 + assert p.y[0] == 200 + assert p.y[1] == i + assert p.y[2] == 400 + assert list(p.y[0:3]) == [200, i, 400] + # + # the following assignment works, as it normally would, for any array field + p.y = [500, 600] + assert list(p.y[0:3]) == [500, 600, 400] + # + # error cases + py.test.raises(TypeError, "p.y = cast(BIntP, 0)") + py.test.raises(TypeError, "p.y = 15") + py.test.raises(TypeError, "p.y = None") + # + # accepting this may be specified by the C99 standard, + # or a GCC strangeness... + BStruct2 = new_struct_type("bar") + complete_struct_or_union(BStruct2, [('f', BStruct), + ('n', BInt)]) + p = newp(new_pointer_type(BStruct2), {'n': 42}) + assert p.n == 42 + # + # more error cases + py.test.raises(TypeError, newp, new_pointer_type(BStruct), [100, None]) + BArray4 = new_array_type(BIntP, 4) + BStruct4 = new_struct_type("test4") + complete_struct_or_union(BStruct4, [('a', BArray4)]) # not varsized + py.test.raises(TypeError, newp, new_pointer_type(BStruct4), [None]) + py.test.raises(TypeError, newp, new_pointer_type(BStruct4), [4]) + p = newp(new_pointer_type(BStruct4), [[10, 20, 30]]) + assert p.a[0] == 10 + assert p.a[1] == 20 + assert p.a[2] == 30 + assert p.a[3] == 0 + + def test_version(): # this test is here mostly for PyPy assert __version__ == "0.7" From noreply at buildbot.pypy.org Fri Nov 8 19:56:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 8 Nov 2013 19:56:00 +0100 (CET) Subject: [pypy-commit] cffi c99-array: Allow varsized arrays to appear at a non-last position in case of a Message-ID: <20131108185600.C557E1C1473@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c99-array Changeset: r1386:8bc8dd043806 Date: 2013-11-08 19:55 +0100 http://bitbucket.org/cffi/cffi/changeset/8bc8dd043806/ Log: Allow varsized arrays to appear at a non-last position in case of a verify()ed structure diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -3621,7 +3621,7 @@ if (ftype->ct_size < 0) { if ((ftype->ct_flags & CT_ARRAY) && fbitsize < 0 - && i == nb_fields - 1) { + && (i == nb_fields - 1 || foffset != -1)) { ct->ct_flags |= CT_WITH_VAR_ARRAY; } else { diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3050,6 +3050,45 @@ assert p.a[2] == 30 assert p.a[3] == 0 +def test_struct_array_no_length_explicit_position(): + BInt = new_primitive_type("int") + BIntP = new_pointer_type(BInt) + BArray = new_array_type(BIntP, None) + BStruct = new_struct_type("foo") + complete_struct_or_union(BStruct, [('x', BArray, -1, 0), # actually 3 items + ('y', BInt, -1, 12)]) + p = newp(new_pointer_type(BStruct), [[10, 20], 30]) + assert p.x[0] == 10 + assert p.x[1] == 20 + assert p.x[2] == 0 + assert p.y == 30 + p = newp(new_pointer_type(BStruct), {'x': [40], 'y': 50}) + assert p.x[0] == 40 + assert p.x[1] == 0 + assert p.x[2] == 0 + assert p.y == 50 + p = newp(new_pointer_type(BStruct), {'y': 60}) + assert p.x[0] == 0 + assert p.x[1] == 0 + assert p.x[2] == 0 + assert p.y == 60 + # + # This "should" work too, allocating a larger structure + # (a bit strange in this case, but useful in general) + plist = [] + for i in range(20): + p = newp(new_pointer_type(BStruct), [[10, 20, 30, 40, 50, 60, 70]]) + plist.append(p) + for i in range(20): + p = plist[i] + assert p.x[0] == 10 + assert p.x[1] == 20 + assert p.x[2] == 30 + assert p.x[3] == 40 == p.y + assert p.x[4] == 50 + assert p.x[5] == 60 + assert p.x[6] == 70 + def test_version(): # this test is here mostly for PyPy From noreply at buildbot.pypy.org Fri Nov 8 20:47:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 8 Nov 2013 20:47:20 +0100 (CET) Subject: [pypy-commit] cffi c99-array: Add a test, now passing. Fixes in the front-end. Message-ID: <20131108194720.C09931C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c99-array Changeset: r1387:1737e2a72c02 Date: 2013-11-08 19:58 +0100 http://bitbucket.org/cffi/cffi/changeset/1737e2a72c02/ Log: Add a test, now passing. Fixes in the front-end. diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -327,17 +327,21 @@ "field '%s.%s' has a bogus size?" % ( self.name, self.fldnames[i] or '{}')) ftype = ftype.resolve_length(nlen) + fsize = 0 self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) # BFieldType = ftype.get_cached_btype(ffi, finishlist) - bitemsize = ffi.sizeof(BFieldType) - if bitemsize != fsize: - self._verification_error( - "field '%s.%s' is declared as %d bytes, but is " - "really %d bytes" % (self.name, - self.fldnames[i] or '{}', - bitemsize, fsize)) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) fldtypes.append(BFieldType) # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -464,11 +464,14 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return _cffi_get_struct_layout(nums);') diff --git a/testing/test_ffi_backend.py b/testing/test_ffi_backend.py --- a/testing/test_ffi_backend.py +++ b/testing/test_ffi_backend.py @@ -184,3 +184,13 @@ ffi.cdef("typedef struct { float x; } foo_t;") p = ffi.new("foo_t *", [5.2]) assert repr(p).startswith(" Author: Armin Rigo Branch: c99-array Changeset: r1388:5396584e5493 Date: 2013-11-08 20:09 +0100 http://bitbucket.org/cffi/cffi/changeset/5396584e5493/ Log: Fix some tests diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -327,7 +327,6 @@ "field '%s.%s' has a bogus size?" % ( self.name, self.fldnames[i] or '{}')) ftype = ftype.resolve_length(nlen) - fsize = 0 self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) # diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -531,9 +531,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -255,11 +255,14 @@ prnt(' static ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return nums[i];') @@ -319,9 +322,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -483,7 +487,7 @@ value = self._load_constant(False, tp_ptr, name, module) # 'value' is a which we have to replace with # a if the N is actually known - if tp.length is not None: + if tp.length is not None and tp.length != '...': BArray = self.ffi._get_cached_btype(tp) value = self.ffi.cast(BArray, value) setattr(library, name, value) diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -517,15 +517,15 @@ ffi.verify("struct foo_s { int x; int a[]; };") assert ffi.sizeof('struct foo_s') == 1 * ffi.sizeof('int') s = ffi.new("struct foo_s *", [424242, 4]) - assert ffi.sizeof(s) == 5 * ffi.sizeof('int') + assert ffi.sizeof(s[0]) == 1 * ffi.sizeof('int') # the same in C assert s.a[3] == 0 s = ffi.new("struct foo_s *", [424242, [-40, -30, -20, -10]]) - assert ffi.sizeof(s) == 5 * ffi.sizeof('int') + assert ffi.sizeof(s[0]) == 1 * ffi.sizeof('int') assert s.a[3] == -10 s = ffi.new("struct foo_s *") - assert ffi.sizeof(s) == 1 * ffi.sizeof('int') + assert ffi.sizeof(s[0]) == 1 * ffi.sizeof('int') s = ffi.new("struct foo_s *", [424242]) - assert ffi.sizeof(s) == 1 * ffi.sizeof('int') + assert ffi.sizeof(s[0]) == 1 * ffi.sizeof('int') def test_struct_array_c99_2(): if sys.platform == 'win32': @@ -535,15 +535,15 @@ ffi.verify("struct foo_s { int x, y; int a[]; };") assert ffi.sizeof('struct foo_s') == 2 * ffi.sizeof('int') s = ffi.new("struct foo_s *", [424242, 4]) - assert ffi.sizeof(s) == 6 * ffi.sizeof('int') + assert ffi.sizeof(s[0]) == 2 * ffi.sizeof('int') assert s.a[3] == 0 s = ffi.new("struct foo_s *", [424242, [-40, -30, -20, -10]]) - assert ffi.sizeof(s) == 6 * ffi.sizeof('int') + assert ffi.sizeof(s[0]) == 2 * ffi.sizeof('int') assert s.a[3] == -10 s = ffi.new("struct foo_s *") - assert ffi.sizeof(s) == 2 * ffi.sizeof('int') + assert ffi.sizeof(s[0]) == 2 * ffi.sizeof('int') s = ffi.new("struct foo_s *", [424242]) - assert ffi.sizeof(s) == 2 * ffi.sizeof('int') + assert ffi.sizeof(s[0]) == 2 * ffi.sizeof('int') def test_struct_ptr_to_array_field(): ffi = FFI() From noreply at buildbot.pypy.org Fri Nov 8 20:47:23 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 8 Nov 2013 20:47:23 +0100 (CET) Subject: [pypy-commit] cffi c99-array: Fix vengine_gen for global arrays with [...]. Message-ID: <20131108194723.416C51C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c99-array Changeset: r1389:2a2ec0277dbf Date: 2013-11-08 20:29 +0100 http://bitbucket.org/cffi/cffi/changeset/2a2ec0277dbf/ Log: Fix vengine_gen for global arrays with [...]. diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -472,6 +472,14 @@ def _generate_gen_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): + if tp.length == '...': + prnt = self._prnt + funcname = '_cffi_sizeof_%s' % (name,) + self.export_symbols.append(funcname) + prnt("size_t %s(void)" % funcname) + prnt("{") + prnt(" return sizeof(%s);" % (name,)) + prnt("}") tp_ptr = model.PointerType(tp.item) self._generate_gen_const(False, name, tp_ptr) else: @@ -483,11 +491,23 @@ def _loaded_gen_variable(self, tp, name, module, library): if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the # sense that "a=..." is forbidden + if tp.length == '...': + funcname = '_cffi_sizeof_%s' % (name,) + BFunc = self.ffi.typeof('size_t(*)(void)') + function = module.load_function(BFunc, funcname) + size = function() + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) tp_ptr = model.PointerType(tp.item) value = self._load_constant(False, tp_ptr, name, module) # 'value' is a which we have to replace with # a if the N is actually known - if tp.length is not None and tp.length != '...': + if tp.length is not None: BArray = self.ffi._get_cached_btype(tp) value = self.ffi.cast(BArray, value) setattr(library, name, value) diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1509,7 +1509,12 @@ ffi = FFI() ffi.cdef("int fooarray[...];") lib = ffi.verify("int fooarray[50];") - assert repr(lib.fooarray).startswith(" Author: Armin Rigo Branch: c99-array Changeset: r1390:54a2912d349e Date: 2013-11-08 20:46 +0100 http://bitbucket.org/cffi/cffi/changeset/54a2912d349e/ Log: Fix vengine_cpy too. diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -280,8 +280,8 @@ return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, model.ArrayType): - return '_cffi_from_c_deref((char *)%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) elif isinstance(tp, model.StructType): if tp.fldnames is None: raise TypeError("'%s' is used as %s, but is opaque" % ( @@ -570,7 +570,7 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True): + vartp=None, delayed=True, size_too=False): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -601,6 +601,15 @@ '(unsigned long long)(%s));' % (name,)) prnt(' if (o == NULL)') prnt(' return -1;') + if size_too: + prnt(' {') + prnt(' PyObject *o1 = o;') + prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));' + % (name,)) + prnt(' Py_DECREF(o1);') + prnt(' if (o == NULL)') + prnt(' return -1;') + prnt(' }') prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name) prnt(' Py_DECREF(o);') prnt(' if (res < 0)') @@ -681,15 +690,16 @@ def _generate_cpy_variable_collecttype(self, tp, name): if isinstance(tp, model.ArrayType): - self._do_collect_type(tp) + tp_ptr = model.PointerType(tp.item) else: tp_ptr = model.PointerType(tp) - self._do_collect_type(tp_ptr) + self._do_collect_type(tp_ptr) def _generate_cpy_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): tp_ptr = model.PointerType(tp.item) - self._generate_cpy_const(False, name, tp, vartp=tp_ptr) + self._generate_cpy_const(False, name, tp, vartp=tp_ptr, + size_too = (tp.length == '...')) else: tp_ptr = model.PointerType(tp) self._generate_cpy_const(False, name, tp_ptr, category='var') @@ -698,11 +708,29 @@ _loading_cpy_variable = _loaded_noop def _loaded_cpy_variable(self, tp, name, module, library): + value = getattr(library, name) if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the - return # sense that "a=..." is forbidden + # sense that "a=..." is forbidden + if tp.length == '...': + assert isinstance(value, tuple) + (value, size) = value + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. - ptr = getattr(library, name) + ptr = value delattr(library, name) def getter(library): return ptr[0] From noreply at buildbot.pypy.org Fri Nov 8 20:47:27 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 8 Nov 2013 20:47:27 +0100 (CET) Subject: [pypy-commit] cffi c99-array: Fix test. Message-ID: <20131108194727.759551C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c99-array Changeset: r1391:9d6086e590d4 Date: 2013-11-08 20:46 +0100 http://bitbucket.org/cffi/cffi/changeset/9d6086e590d4/ Log: Fix test. diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1741,7 +1741,7 @@ ffi = FFI() ffi.cdef("""const char *a[...];""") lib = ffi.verify("""const char *a[5];""") - assert repr(ffi.typeof(lib.a)) == "" + assert repr(ffi.typeof(lib.a)) == "" def test_bug_const_char_ptr_array_2(): from cffi import FFI # ignore warnings From noreply at buildbot.pypy.org Fri Nov 8 21:18:45 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 8 Nov 2013 21:18:45 +0100 (CET) Subject: [pypy-commit] cffi c99-array: Document the changes with C99-style arrays. Message-ID: <20131108201845.9AE411C030D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c99-array Changeset: r1392:029f4538b29f Date: 2013-11-08 21:18 +0100 http://bitbucket.org/cffi/cffi/changeset/029f4538b29f/ Log: Document the changes with C99-style arrays. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -573,9 +573,18 @@ ``foo_t`` is not opaque, but you just don't know any field in it; then you would use "``typedef struct { ...; } foo_t;``". -* array lengths: when used as structure fields, arrays can have an - unspecified length, as in "``int n[];``" or "``int n[...];``". - The length is completed by the C compiler. +* array lengths: when used as structure fields or in global variables, + arrays can have an unspecified length, as in "``int n[...];``". The + length is completed by the C compiler. (Only the outermost array + may have an unknown length, in case of array-of-array.) + You can also use the syntax "``int n[];``". + +.. versionchanged:: 0.8 + "``int n[];``" asks for an array of unknown length whose length must + *not* be completed by the C compiler. See `variable-length array`_ + below. If the structure does not contain the syntax ``...`` anywhere, + it will be not be considered to have a partial layout to complete by + the compiler. * enums: if you don't know the exact order (or values) of the declared constants, then use this syntax: "``enum foo { A, B, C, ... };``" @@ -1278,11 +1287,6 @@ * Thread-local variables (access them via getter/setter functions) -* Variable-length structures, i.e. whose last field is a variable-length - array (work around like in C, e.g. by declaring it as an array of - length 0, allocating a ``char[]`` of the correct size, and casting - it to a struct pointer) - .. versionadded:: 0.4 Now supported: the common GCC extension of anonymous nested structs/unions inside structs/unions. @@ -1297,6 +1301,20 @@ this by naming the largest value. A similar but less important problem involves negative values.* +.. _`variable-length array`: + +.. versionadded:: 0.8 + Now supported: variable-length structures, i.e. whose last field is + a variable-length array. + +Note that since version 0.8, declarations like ``int field[];`` in +structures are interpreted as variable-length structures. When used for +structures that are not, in fact, variable-length, it works too; in this +case, the difference with using ``int field[...];`` is that, as CFFI +believes it cannot ask the C compiler for the length of the array, you +get reduced safety checks: for example, you risk overwriting the +following fields by passing too many array items in the constructor. + Debugging dlopen'ed C libraries ------------------------------- From noreply at buildbot.pypy.org Fri Nov 8 21:29:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 8 Nov 2013 21:29:47 +0100 (CET) Subject: [pypy-commit] cffi default: hg merge c99-array Message-ID: <20131108202947.549091C1402@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1393:3a53adb16e28 Date: 2013-11-08 21:29 +0100 http://bitbucket.org/cffi/cffi/changeset/3a53adb16e28/ Log: hg merge c99-array Adding more direct support for C99-style arrays inside structs. Non- fully-backward-compatible changes documented. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -86,6 +86,7 @@ #define CT_IS_BOOL 131072 #define CT_IS_FILE 262144 #define CT_IS_VOID_PTR 524288 +#define CT_WITH_VAR_ARRAY 1048576 #define CT_PRIMITIVE_ANY (CT_PRIMITIVE_SIGNED | \ CT_PRIMITIVE_UNSIGNED | \ CT_PRIMITIVE_CHAR | \ @@ -1007,9 +1008,39 @@ static int /* forward */ convert_from_object_bitfield(char *data, CFieldObject *cf, PyObject *init); +static Py_ssize_t +get_new_array_length(PyObject **pvalue) +{ + PyObject *value = *pvalue; + + if (PyList_Check(value) || PyTuple_Check(value)) { + return PySequence_Fast_GET_SIZE(value); + } + else if (PyBytes_Check(value)) { + /* from a string, we add the null terminator */ + return PyBytes_GET_SIZE(value) + 1; + } + else if (PyUnicode_Check(value)) { + /* from a unicode, we add the null terminator */ + return _my_PyUnicode_SizeAsWideChar(value) + 1; + } + else { + Py_ssize_t explicitlength; + explicitlength = PyNumber_AsSsize_t(value, PyExc_OverflowError); + if (explicitlength < 0) { + if (!PyErr_Occurred()) + PyErr_SetString(PyExc_ValueError, "negative array length"); + return -1; + } + *pvalue = Py_None; + return explicitlength; + } +} + static int convert_field_from_object(char *data, CFieldObject *cf, PyObject *value) { + data += cf->cf_offset; if (cf->cf_bitshift >= 0) return convert_from_object_bitfield(data, cf, value); else @@ -1017,6 +1048,45 @@ } static int +convert_vfield_from_object(char *data, CFieldObject *cf, PyObject *value, + Py_ssize_t *optvarsize) +{ + /* a special case for var-sized C99 arrays */ + if ((cf->cf_type->ct_flags & CT_ARRAY) && cf->cf_type->ct_size < 0) { + Py_ssize_t varsizelength = get_new_array_length(&value); + if (varsizelength < 0) + return -1; + if (optvarsize != NULL) { + /* in this mode, the only purpose of this function is to compute + the real size of the structure from a var-sized C99 array */ + Py_ssize_t size, itemsize; + assert(data == NULL); + itemsize = cf->cf_type->ct_itemdescr->ct_size; + size = cf->cf_offset + itemsize * varsizelength; + if (size < 0 || + ((size - cf->cf_offset) / itemsize) != varsizelength) { + PyErr_SetString(PyExc_OverflowError, + "array size would overflow a Py_ssize_t"); + return -1; + } + if (size > *optvarsize) + *optvarsize = size; + return 0; + } + /* if 'value' was only an integer, get_new_array_length() returns + it and convert 'value' to be None. Detect if this was the case, + and if so, stop here, leaving the content uninitialized + (it should be zero-initialized from somewhere else). */ + if (value == Py_None) + return 0; + } + if (optvarsize == NULL) + return convert_field_from_object(data, cf, value); + else + return 0; +} + +static int convert_array_from_object(char *data, CTypeDescrObject *ct, PyObject *init) { /* used by convert_from_object(), and also to decode lists/tuples/unicodes @@ -1097,6 +1167,63 @@ } static int +convert_struct_from_object(char *data, CTypeDescrObject *ct, PyObject *init, + Py_ssize_t *optvarsize) +{ + const char *expected; + + if (ct->ct_flags & CT_UNION) { + Py_ssize_t n = PyObject_Size(init); + if (n < 0) + return -1; + if (n > 1) { + PyErr_Format(PyExc_ValueError, + "initializer for '%s': %zd items given, but " + "only one supported (use a dict if needed)", + ct->ct_name, n); + return -1; + } + } + if (PyList_Check(init) || PyTuple_Check(init)) { + PyObject **items = PySequence_Fast_ITEMS(init); + Py_ssize_t i, n = PySequence_Fast_GET_SIZE(init); + CFieldObject *cf = (CFieldObject *)ct->ct_extra; + + for (i=0; ict_name, n); + return -1; + } + if (convert_vfield_from_object(data, cf, items[i], optvarsize) < 0) + return -1; + cf = cf->cf_next; + } + return 0; + } + if (PyDict_Check(init)) { + PyObject *d_key, *d_value; + Py_ssize_t i = 0; + CFieldObject *cf; + + while (PyDict_Next(init, &i, &d_key, &d_value)) { + cf = (CFieldObject *)PyDict_GetItem(ct->ct_stuff, d_key); + if (cf == NULL) { + PyErr_SetObject(PyExc_KeyError, d_key); + return -1; + } + if (convert_vfield_from_object(data, cf, d_value, optvarsize) < 0) + return -1; + } + return 0; + } + expected = optvarsize == NULL ? "list or tuple or dict or struct-cdata" + : "list or tuple or dict"; + return _convert_error(init, ct->ct_name, expected); +} + +static int convert_from_object(char *data, CTypeDescrObject *ct, PyObject *init) { const char *expected; @@ -1209,56 +1336,7 @@ return 0; } } - if (ct->ct_flags & CT_UNION) { - Py_ssize_t n = PyObject_Size(init); - if (n < 0) - return -1; - if (n > 1) { - PyErr_Format(PyExc_ValueError, - "initializer for '%s': %zd items given, but " - "only one supported (use a dict if needed)", - ct->ct_name, n); - return -1; - } - } - if (PyList_Check(init) || PyTuple_Check(init)) { - PyObject **items = PySequence_Fast_ITEMS(init); - Py_ssize_t i, n = PySequence_Fast_GET_SIZE(init); - CFieldObject *cf = (CFieldObject *)ct->ct_extra; - - for (i=0; ict_name, n); - return -1; - } - if (convert_field_from_object(data + cf->cf_offset, - cf, items[i]) < 0) - return -1; - cf = cf->cf_next; - } - return 0; - } - if (PyDict_Check(init)) { - PyObject *d_key, *d_value; - Py_ssize_t i = 0; - CFieldObject *cf; - - while (PyDict_Next(init, &i, &d_key, &d_value)) { - cf = (CFieldObject *)PyDict_GetItem(ct->ct_stuff, d_key); - if (cf == NULL) { - PyErr_SetObject(PyExc_KeyError, d_key); - return -1; - } - if (convert_field_from_object(data + cf->cf_offset, - cf, d_value) < 0) - return -1; - } - return 0; - } - expected = "list or tuple or dict or struct-cdata"; - goto cannot_convert; + return convert_struct_from_object(data, ct, init, NULL); } PyErr_Format(PyExc_SystemError, "convert_from_object: '%s'", ct->ct_name); @@ -2068,9 +2146,8 @@ cf = (CFieldObject *)PyDict_GetItem(ct->ct_stuff, attr); if (cf != NULL) { /* write the field 'cf' */ - char *data = cd->c_data + cf->cf_offset; if (value != NULL) { - return convert_field_from_object(data, cf, value); + return convert_field_from_object(cd->c_data, cf, value); } else { PyErr_SetString(PyExc_AttributeError, @@ -2642,32 +2719,21 @@ } if (ctitem->ct_flags & CT_PRIMITIVE_CHAR) datasize *= 2; /* forcefully add another character: a null */ + + if ((ctitem->ct_flags & CT_WITH_VAR_ARRAY) && init != Py_None) { + Py_ssize_t optvarsize = datasize; + if (convert_struct_from_object(NULL,ctitem, init, &optvarsize) < 0) + return NULL; + datasize = optvarsize; + } } else if (ct->ct_flags & CT_ARRAY) { dataoffset = offsetof(CDataObject_own_nolength, alignment); datasize = ct->ct_size; if (datasize < 0) { - if (PyList_Check(init) || PyTuple_Check(init)) { - explicitlength = PySequence_Fast_GET_SIZE(init); - } - else if (PyBytes_Check(init)) { - /* from a string, we add the null terminator */ - explicitlength = PyBytes_GET_SIZE(init) + 1; - } - else if (PyUnicode_Check(init)) { - /* from a unicode, we add the null terminator */ - explicitlength = _my_PyUnicode_SizeAsWideChar(init) + 1; - } - else { - explicitlength = PyNumber_AsSsize_t(init, PyExc_OverflowError); - if (explicitlength < 0) { - if (!PyErr_Occurred()) - PyErr_SetString(PyExc_ValueError, - "negative array length"); - return NULL; - } - init = Py_None; - } + explicitlength = get_new_array_length(&init); + if (explicitlength < 0) + return NULL; ctitem = ct->ct_itemdescr; dataoffset = offsetof(CDataObject_own_length, alignment); datasize = explicitlength * ctitem->ct_size; @@ -3554,11 +3620,17 @@ goto error; if (ftype->ct_size < 0) { - PyErr_Format(PyExc_TypeError, - "field '%s.%s' has ctype '%s' of unknown size", - ct->ct_name, PyText_AS_UTF8(fname), - ftype->ct_name); - goto error; + if ((ftype->ct_flags & CT_ARRAY) && fbitsize < 0 + && (i == nb_fields - 1 || foffset != -1)) { + ct->ct_flags |= CT_WITH_VAR_ARRAY; + } + else { + PyErr_Format(PyExc_TypeError, + "field '%s.%s' has ctype '%s' of unknown size", + ct->ct_name, PyText_AS_UTF8(fname), + ftype->ct_name); + goto error; + } } if (is_union) @@ -3632,7 +3704,8 @@ goto error; previous = &(*previous)->cf_next; } - boffset += ftype->ct_size * 8; + if (ftype->ct_size >= 0) + boffset += ftype->ct_size * 8; prev_bitfield_size = 0; } else { diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -2963,6 +2963,133 @@ _test_bitfield_details(flag=4) +def test_struct_array_no_length(): + BInt = new_primitive_type("int") + BIntP = new_pointer_type(BInt) + BArray = new_array_type(BIntP, None) + BStruct = new_struct_type("foo") + py.test.raises(TypeError, complete_struct_or_union, + BStruct, [('x', BArray), + ('y', BInt)]) + # + BStruct = new_struct_type("foo") + complete_struct_or_union(BStruct, [('x', BInt), + ('y', BArray)]) + assert sizeof(BStruct) == size_of_int() + d = BStruct.fields + assert len(d) == 2 + assert d[0][0] == 'x' + assert d[0][1].type is BInt + assert d[0][1].offset == 0 + assert d[0][1].bitshift == -1 + assert d[0][1].bitsize == -1 + assert d[1][0] == 'y' + assert d[1][1].type is BArray + assert d[1][1].offset == size_of_int() + assert d[1][1].bitshift == -1 + assert d[1][1].bitsize == -1 + # + p = newp(new_pointer_type(BStruct)) + p.x = 42 + assert p.x == 42 + assert typeof(p.y) is BIntP + assert p.y == cast(BIntP, p) + 1 + # + p = newp(new_pointer_type(BStruct), [100]) + assert p.x == 100 + # + # Tests for + # ffi.new("struct_with_var_array *", [field.., [the_array_items..]]) + # ffi.new("struct_with_var_array *", [field.., array_size]) + plist = [] + for i in range(20): + if i % 2 == 0: + p = newp(new_pointer_type(BStruct), [100, [200, i, 400]]) + else: + p = newp(new_pointer_type(BStruct), [100, 3]) + p.y[1] = i + p.y[0] = 200 + assert p.y[2] == 0 + p.y[2] = 400 + plist.append(p) + for i in range(20): + p = plist[i] + assert p.x == 100 + assert p.y[0] == 200 + assert p.y[1] == i + assert p.y[2] == 400 + assert list(p.y[0:3]) == [200, i, 400] + # + # the following assignment works, as it normally would, for any array field + p.y = [500, 600] + assert list(p.y[0:3]) == [500, 600, 400] + # + # error cases + py.test.raises(TypeError, "p.y = cast(BIntP, 0)") + py.test.raises(TypeError, "p.y = 15") + py.test.raises(TypeError, "p.y = None") + # + # accepting this may be specified by the C99 standard, + # or a GCC strangeness... + BStruct2 = new_struct_type("bar") + complete_struct_or_union(BStruct2, [('f', BStruct), + ('n', BInt)]) + p = newp(new_pointer_type(BStruct2), {'n': 42}) + assert p.n == 42 + # + # more error cases + py.test.raises(TypeError, newp, new_pointer_type(BStruct), [100, None]) + BArray4 = new_array_type(BIntP, 4) + BStruct4 = new_struct_type("test4") + complete_struct_or_union(BStruct4, [('a', BArray4)]) # not varsized + py.test.raises(TypeError, newp, new_pointer_type(BStruct4), [None]) + py.test.raises(TypeError, newp, new_pointer_type(BStruct4), [4]) + p = newp(new_pointer_type(BStruct4), [[10, 20, 30]]) + assert p.a[0] == 10 + assert p.a[1] == 20 + assert p.a[2] == 30 + assert p.a[3] == 0 + +def test_struct_array_no_length_explicit_position(): + BInt = new_primitive_type("int") + BIntP = new_pointer_type(BInt) + BArray = new_array_type(BIntP, None) + BStruct = new_struct_type("foo") + complete_struct_or_union(BStruct, [('x', BArray, -1, 0), # actually 3 items + ('y', BInt, -1, 12)]) + p = newp(new_pointer_type(BStruct), [[10, 20], 30]) + assert p.x[0] == 10 + assert p.x[1] == 20 + assert p.x[2] == 0 + assert p.y == 30 + p = newp(new_pointer_type(BStruct), {'x': [40], 'y': 50}) + assert p.x[0] == 40 + assert p.x[1] == 0 + assert p.x[2] == 0 + assert p.y == 50 + p = newp(new_pointer_type(BStruct), {'y': 60}) + assert p.x[0] == 0 + assert p.x[1] == 0 + assert p.x[2] == 0 + assert p.y == 60 + # + # This "should" work too, allocating a larger structure + # (a bit strange in this case, but useful in general) + plist = [] + for i in range(20): + p = newp(new_pointer_type(BStruct), [[10, 20, 30, 40, 50, 60, 70]]) + plist.append(p) + for i in range(20): + p = plist[i] + assert p.x[0] == 10 + assert p.x[1] == 20 + assert p.x[2] == 30 + assert p.x[3] == 40 == p.y + assert p.x[4] == 50 + assert p.x[5] == 60 + assert p.x[6] == 70 + + def test_version(): # this test is here mostly for PyPy assert __version__ == "0.7" diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -508,7 +508,7 @@ if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): self._partial_length = True - return None + return '...' # raise api.FFIError("unsupported expression: expected a " "simple numeric constant") diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -212,10 +212,10 @@ self.item = item self.length = length # - if self.length is None: + if length is None or length == '...': brackets = '&[]' else: - brackets = '&[%d]' % self.length + brackets = '&[%d]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) @@ -223,6 +223,10 @@ return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): + if self.length == '...': + from . import api + raise api.CDefError("cannot render the type %r: unknown length" % + (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) @@ -300,20 +304,21 @@ return # not completing it: it's an opaque struct # self.completed = 1 - fldtypes = tuple(tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes) # if self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) ffi._backend.complete_struct_or_union(BType, lst, self) # else: + fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # - if isinstance(ftype, ArrayType) and ftype.length is None: + if isinstance(ftype, ArrayType) and ftype.length == '...': # fix the length to match the total size BItemType = ftype.item.get_cached_btype(ffi, finishlist) nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) @@ -324,18 +329,20 @@ ftype = ftype.resolve_length(nlen) self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) - BArrayType = ftype.get_cached_btype(ffi, finishlist) - fldtypes = (fldtypes[:i] + (BArrayType,) + - fldtypes[i+1:]) - continue # - bitemsize = ffi.sizeof(fldtypes[i]) - if bitemsize != fsize: - self._verification_error( - "field '%s.%s' is declared as %d bytes, but is " - "really %d bytes" % (self.name, - self.fldnames[i] or '{}', - bitemsize, fsize)) + BFieldType = ftype.get_cached_btype(ffi, finishlist) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) + fldtypes.append(BFieldType) + # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) ffi._backend.complete_struct_or_union(BType, lst, self, totalsize, totalalignment) diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -280,8 +280,8 @@ return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, model.ArrayType): - return '_cffi_from_c_deref((char *)%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) elif isinstance(tp, model.StructType): if tp.fldnames is None: raise TypeError("'%s' is used as %s, but is opaque" % ( @@ -464,11 +464,14 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return _cffi_get_struct_layout(nums);') @@ -528,9 +531,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -566,7 +570,7 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True): + vartp=None, delayed=True, size_too=False): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -597,6 +601,15 @@ '(unsigned long long)(%s));' % (name,)) prnt(' if (o == NULL)') prnt(' return -1;') + if size_too: + prnt(' {') + prnt(' PyObject *o1 = o;') + prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));' + % (name,)) + prnt(' Py_DECREF(o1);') + prnt(' if (o == NULL)') + prnt(' return -1;') + prnt(' }') prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name) prnt(' Py_DECREF(o);') prnt(' if (res < 0)') @@ -677,15 +690,16 @@ def _generate_cpy_variable_collecttype(self, tp, name): if isinstance(tp, model.ArrayType): - self._do_collect_type(tp) + tp_ptr = model.PointerType(tp.item) else: tp_ptr = model.PointerType(tp) - self._do_collect_type(tp_ptr) + self._do_collect_type(tp_ptr) def _generate_cpy_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): tp_ptr = model.PointerType(tp.item) - self._generate_cpy_const(False, name, tp, vartp=tp_ptr) + self._generate_cpy_const(False, name, tp, vartp=tp_ptr, + size_too = (tp.length == '...')) else: tp_ptr = model.PointerType(tp) self._generate_cpy_const(False, name, tp_ptr, category='var') @@ -694,11 +708,29 @@ _loading_cpy_variable = _loaded_noop def _loaded_cpy_variable(self, tp, name, module, library): + value = getattr(library, name) if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the - return # sense that "a=..." is forbidden + # sense that "a=..." is forbidden + if tp.length == '...': + assert isinstance(value, tuple) + (value, size) = value + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. - ptr = getattr(library, name) + ptr = value delattr(library, name) def getter(library): return ptr[0] diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -255,11 +255,14 @@ prnt(' static ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return nums[i];') @@ -319,9 +322,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -468,6 +472,14 @@ def _generate_gen_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): + if tp.length == '...': + prnt = self._prnt + funcname = '_cffi_sizeof_%s' % (name,) + self.export_symbols.append(funcname) + prnt("size_t %s(void)" % funcname) + prnt("{") + prnt(" return sizeof(%s);" % (name,)) + prnt("}") tp_ptr = model.PointerType(tp.item) self._generate_gen_const(False, name, tp_ptr) else: @@ -479,6 +491,18 @@ def _loaded_gen_variable(self, tp, name, module, library): if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the # sense that "a=..." is forbidden + if tp.length == '...': + funcname = '_cffi_sizeof_%s' % (name,) + BFunc = self.ffi.typeof('size_t(*)(void)') + function = module.load_function(BFunc, funcname) + size = function() + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) tp_ptr = model.PointerType(tp.item) value = self._load_constant(False, tp_ptr, name, module) # 'value' is a which we have to replace with diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -573,9 +573,18 @@ ``foo_t`` is not opaque, but you just don't know any field in it; then you would use "``typedef struct { ...; } foo_t;``". -* array lengths: when used as structure fields, arrays can have an - unspecified length, as in "``int n[];``" or "``int n[...];``". - The length is completed by the C compiler. +* array lengths: when used as structure fields or in global variables, + arrays can have an unspecified length, as in "``int n[...];``". The + length is completed by the C compiler. (Only the outermost array + may have an unknown length, in case of array-of-array.) + You can also use the syntax "``int n[];``". + +.. versionchanged:: 0.8 + "``int n[];``" asks for an array of unknown length whose length must + *not* be completed by the C compiler. See `variable-length array`_ + below. If the structure does not contain the syntax ``...`` anywhere, + it will be not be considered to have a partial layout to complete by + the compiler. * enums: if you don't know the exact order (or values) of the declared constants, then use this syntax: "``enum foo { A, B, C, ... };``" @@ -1278,11 +1287,6 @@ * Thread-local variables (access them via getter/setter functions) -* Variable-length structures, i.e. whose last field is a variable-length - array (work around like in C, e.g. by declaring it as an array of - length 0, allocating a ``char[]`` of the correct size, and casting - it to a struct pointer) - .. versionadded:: 0.4 Now supported: the common GCC extension of anonymous nested structs/unions inside structs/unions. @@ -1297,6 +1301,20 @@ this by naming the largest value. A similar but less important problem involves negative values.* +.. _`variable-length array`: + +.. versionadded:: 0.8 + Now supported: variable-length structures, i.e. whose last field is + a variable-length array. + +Note that since version 0.8, declarations like ``int field[];`` in +structures are interpreted as variable-length structures. When used for +structures that are not, in fact, variable-length, it works too; in this +case, the difference with using ``int field[...];`` is that, as CFFI +believes it cannot ask the C compiler for the length of the array, you +get reduced safety checks: for example, you risk overwriting the +following fields by passing too many array items in the constructor. + Debugging dlopen'ed C libraries ------------------------------- diff --git a/testing/test_ffi_backend.py b/testing/test_ffi_backend.py --- a/testing/test_ffi_backend.py +++ b/testing/test_ffi_backend.py @@ -184,3 +184,13 @@ ffi.cdef("typedef struct { float x; } foo_t;") p = ffi.new("foo_t *", [5.2]) assert repr(p).startswith("a[14]; }\n") assert ffi.sizeof('struct foo_s') == 19 * ffi.sizeof('int') s = ffi.new("struct foo_s *") + assert ffi.typeof(s.a) is ffi.typeof('int *') # because no length s.a[14] = 4242 assert lib.bar(s) == 4242 + # with no declared length, out-of-bound accesses are not detected + s.a[17] = -521 + assert s.y == s.a[17] == -521 + # + s = ffi.new("struct foo_s *", {'a': list(range(17))}) + assert s.a[16] == 16 + # overflows at construction time not detected either + s = ffi.new("struct foo_s *", {'a': list(range(18))}) + assert s.y == s.a[17] == 17 -def test_struct_array_guess_length_3(): +def test_struct_array_guess_length(): ffi = FFI() ffi.cdef("struct foo_s { int a[...]; };") ffi.verify("struct foo_s { int x; int a[17]; int y; };") assert ffi.sizeof('struct foo_s') == 19 * ffi.sizeof('int') s = ffi.new("struct foo_s *") assert ffi.sizeof(s.a) == 17 * ffi.sizeof('int') + py.test.raises(IndexError, 's.a[17]') + +def test_struct_array_c99_1(): + if sys.platform == 'win32': + py.test.skip("requires C99") + ffi = FFI() + ffi.cdef("struct foo_s { int x; int a[]; };") + ffi.verify("struct foo_s { int x; int a[]; };") + assert ffi.sizeof('struct foo_s') == 1 * ffi.sizeof('int') + s = ffi.new("struct foo_s *", [424242, 4]) + assert ffi.sizeof(s[0]) == 1 * ffi.sizeof('int') # the same in C + assert s.a[3] == 0 + s = ffi.new("struct foo_s *", [424242, [-40, -30, -20, -10]]) + assert ffi.sizeof(s[0]) == 1 * ffi.sizeof('int') + assert s.a[3] == -10 + s = ffi.new("struct foo_s *") + assert ffi.sizeof(s[0]) == 1 * ffi.sizeof('int') + s = ffi.new("struct foo_s *", [424242]) + assert ffi.sizeof(s[0]) == 1 * ffi.sizeof('int') + +def test_struct_array_c99_2(): + if sys.platform == 'win32': + py.test.skip("requires C99") + ffi = FFI() + ffi.cdef("struct foo_s { int x; int a[]; ...; };") + ffi.verify("struct foo_s { int x, y; int a[]; };") + assert ffi.sizeof('struct foo_s') == 2 * ffi.sizeof('int') + s = ffi.new("struct foo_s *", [424242, 4]) + assert ffi.sizeof(s[0]) == 2 * ffi.sizeof('int') + assert s.a[3] == 0 + s = ffi.new("struct foo_s *", [424242, [-40, -30, -20, -10]]) + assert ffi.sizeof(s[0]) == 2 * ffi.sizeof('int') + assert s.a[3] == -10 + s = ffi.new("struct foo_s *") + assert ffi.sizeof(s[0]) == 2 * ffi.sizeof('int') + s = ffi.new("struct foo_s *", [424242]) + assert ffi.sizeof(s[0]) == 2 * ffi.sizeof('int') def test_struct_ptr_to_array_field(): ffi = FFI() @@ -1470,7 +1509,12 @@ ffi = FFI() ffi.cdef("int fooarray[...];") lib = ffi.verify("int fooarray[50];") - assert repr(lib.fooarray).startswith("" + assert repr(ffi.typeof(lib.a)) == "" def test_bug_const_char_ptr_array_2(): from cffi import FFI # ignore warnings From noreply at buildbot.pypy.org Fri Nov 8 21:47:23 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 8 Nov 2013 21:47:23 +0100 (CET) Subject: [pypy-commit] cffi default: Test and fix for issue #99. Message-ID: <20131108204723.2512E1C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1394:26390b01617e Date: 2013-11-08 21:47 +0100 http://bitbucket.org/cffi/cffi/changeset/26390b01617e/ Log: Test and fix for issue #99. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -1939,6 +1939,22 @@ } } + /* A fast path for [0:N] = b"somestring", which also adds + support for Python 3: otherwise, you get integers while enumerating + the string, and you can't set them to characters :-/ + */ + if (PyBytes_Check(v) && (ct->ct_flags & CT_PRIMITIVE_CHAR) + && itemsize == sizeof(char)) { + if (PyBytes_GET_SIZE(v) != length) { + PyErr_Format(PyExc_ValueError, + "need a string of length %zd, got %zd", + length, PyBytes_GET_SIZE(v)); + return -1; + } + memcpy(cdata, PyBytes_AS_STRING(v), length); + return 0; + } + it = PyObject_GetIter(v); if (it == NULL) return -1; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3089,6 +3089,25 @@ assert p.x[5] == 60 assert p.x[6] == 70 +def test_ass_slice(): + BChar = new_primitive_type("char") + BArray = new_array_type(new_pointer_type(BChar), None) + p = newp(BArray, b"foobar") + p[2:5] = [b"*", b"Z", b"T"] + p[1:3] = b"XY" + assert list(p) == [b"f", b"X", b"Y", b"Z", b"T", b"r", b"\x00"] + py.test.raises(TypeError, "p[1:5] = u+'XYZT'") + py.test.raises(TypeError, "p[1:5] = [1, 2, 3, 4]") + # + BUniChar = new_primitive_type("wchar_t") + BArray = new_array_type(new_pointer_type(BUniChar), None) + p = newp(BArray, u+"foobar") + p[2:5] = [u+"*", u+"Z", u+"T"] + p[1:3] = u+"XY" + assert list(p) == [u+"f", u+"X", u+"Y", u+"Z", u+"T", u+"r", u+"\x00"] + py.test.raises(TypeError, "p[1:5] = b'XYZT'") + py.test.raises(TypeError, "p[1:5] = [1, 2, 3, 4]") + def test_version(): # this test is here mostly for PyPy From noreply at buildbot.pypy.org Sat Nov 9 00:27:43 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 9 Nov 2013 00:27:43 +0100 (CET) Subject: [pypy-commit] pypy default: Only set this key under some circumstances. Might make stuff a tiny tiny bit faster Message-ID: <20131108232743.02D721C019D@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67885:d7386c6804ec Date: 2013-11-08 15:27 -0800 http://bitbucket.org/pypy/pypy/changeset/d7386c6804ec/ Log: Only set this key under some circumstances. Might make stuff a tiny tiny bit faster diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -96,8 +96,13 @@ idx += 1 def _escape(self, box): - if box in self.new_boxes: - self.new_boxes[box] = False + try: + unescaped = self.new_boxes[box] + except KeyError: + pass + else: + if unescaped: + self.new_boxes[box] = False try: deps = self.dependencies.pop(box) except KeyError: From noreply at buildbot.pypy.org Sat Nov 9 05:25:40 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 9 Nov 2013 05:25:40 +0100 (CET) Subject: [pypy-commit] pypy default: Speed up array.__add__ and array.__iadd__ by using memcpy Message-ID: <20131109042540.2C1BD1C147A@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67886:e948603fa808 Date: 2013-11-08 20:25 -0800 http://bitbucket.org/pypy/pypy/changeset/e948603fa808/ Log: Speed up array.__add__ and array.__iadd__ by using memcpy diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -410,7 +410,6 @@ def descr_getslice(self, space, w_i, w_j): return space.getitem(self, space.newslice(w_i, w_j, space.w_None)) - def descr_setitem(self, space, w_idx, w_item): "x.__setitem__(i, y) <==> x[i]=y" if space.isinstance_w(w_idx, space.w_slice): @@ -913,10 +912,18 @@ return space.w_NotImplemented a = mytype.w_class(space) a.setlen(self.len + w_other.len, overallocate=False) - for i in range(self.len): - a.buffer[i] = self.buffer[i] - for i in range(w_other.len): - a.buffer[i + self.len] = w_other.buffer[i] + if self.len: + rffi.c_memcpy( + rffi.cast(rffi.VOIDP, a.buffer), + rffi.cast(rffi.VOIDP, self.buffer), + self.len * mytype.bytes + ) + if w_other.len: + rffi.c_memcpy( + rffi.cast(rffi.VOIDP, rffi.ptradd(a.buffer, self.len)), + rffi.cast(rffi.VOIDP, w_other.buffer), + w_other.len * mytype.bytes + ) return a def descr_inplace_add(self, space, w_other): @@ -925,8 +932,12 @@ oldlen = self.len otherlen = w_other.len self.setlen(oldlen + otherlen) - for i in range(otherlen): - self.buffer[oldlen + i] = w_other.buffer[i] + if otherlen: + rffi.c_memcpy( + rffi.cast(rffi.VOIDP, rffi.ptradd(self.buffer, oldlen)), + rffi.cast(rffi.VOIDP, w_other.buffer), + otherlen * mytype.bytes + ) return self def descr_mul(self, space, w_repeat): From noreply at buildbot.pypy.org Sat Nov 9 09:09:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 09:09:58 +0100 (CET) Subject: [pypy-commit] cffi default: Arithmetic using "void *" should work; at least it does in gcc without Message-ID: <20131109080958.1E3291C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1395:88d2b9436efd Date: 2013-11-09 09:09 +0100 http://bitbucket.org/cffi/cffi/changeset/88d2b9436efd/ Log: Arithmetic using "void *" should work; at least it does in gcc without warning. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -2050,7 +2050,7 @@ static PyObject * _cdata_add_or_sub(PyObject *v, PyObject *w, int sign) { - Py_ssize_t i; + Py_ssize_t i, itemsize; CDataObject *cd; CTypeDescrObject *ctptr; @@ -2073,14 +2073,19 @@ cd->c_type->ct_name); return NULL; } - if (ctptr->ct_itemdescr->ct_size < 0) { - PyErr_Format(PyExc_TypeError, - "ctype '%s' points to items of unknown size", - cd->c_type->ct_name); - return NULL; - } - return new_simple_cdata(cd->c_data + i * ctptr->ct_itemdescr->ct_size, - ctptr); + itemsize = ctptr->ct_itemdescr->ct_size; + if (itemsize < 0) { + if (ctptr->ct_flags & CT_IS_VOID_PTR) { + itemsize = 1; + } + else { + PyErr_Format(PyExc_TypeError, + "ctype '%s' points to items of unknown size", + cd->c_type->ct_name); + return NULL; + } + } + return new_simple_cdata(cd->c_data + i * itemsize, ctptr); not_implemented: Py_INCREF(Py_NotImplemented); @@ -2101,18 +2106,23 @@ CDataObject *cdw = (CDataObject *)w; CTypeDescrObject *ct = cdw->c_type; Py_ssize_t diff; + Py_ssize_t itemsize; if (ct->ct_flags & CT_ARRAY) /* ptr_to_T - array_of_T: ok */ ct = (CTypeDescrObject *)ct->ct_stuff; + itemsize = ct->ct_itemdescr->ct_size; + if (ct->ct_flags & CT_IS_VOID_PTR) + itemsize = 1; + if (ct != cdv->c_type || !(ct->ct_flags & CT_POINTER) || - (ct->ct_itemdescr->ct_size <= 0)) { + (itemsize <= 0)) { PyErr_Format(PyExc_TypeError, "cannot subtract cdata '%s' and cdata '%s'", cdv->c_type->ct_name, ct->ct_name); return NULL; } - diff = (cdv->c_data - cdw->c_data) / ct->ct_itemdescr->ct_size; + diff = (cdv->c_data - cdw->c_data) / itemsize; #if PY_MAJOR_VERSION < 3 return PyInt_FromSsize_t(diff); #else diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -1642,9 +1642,6 @@ def test_void_errors(): py.test.raises(ValueError, alignof, new_void_type()) py.test.raises(TypeError, newp, new_pointer_type(new_void_type()), None) - x = cast(new_pointer_type(new_void_type()), 42) - py.test.raises(TypeError, "x + 1") - py.test.raises(TypeError, "x - 1") def test_too_many_items(): BChar = new_primitive_type("char") @@ -3108,6 +3105,18 @@ py.test.raises(TypeError, "p[1:5] = b'XYZT'") py.test.raises(TypeError, "p[1:5] = [1, 2, 3, 4]") +def test_void_p_arithmetic(): + BVoid = new_void_type() + BInt = new_primitive_type("intptr_t") + p = cast(new_pointer_type(BVoid), 100000) + assert int(cast(BInt, p)) == 100000 + assert int(cast(BInt, p + 42)) == 100042 + assert int(cast(BInt, p - (-42))) == 100042 + assert (p + 42) - p == 42 + q = cast(new_pointer_type(new_primitive_type("char")), 100000) + py.test.raises(TypeError, "p - q") + py.test.raises(TypeError, "q - p") + def test_version(): # this test is here mostly for PyPy From noreply at buildbot.pypy.org Sat Nov 9 09:18:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 09:18:00 +0100 (CET) Subject: [pypy-commit] cffi default: Support "number + pointer" too, like in C Message-ID: <20131109081800.9DFA51C1473@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1396:2e0764ba0a2a Date: 2013-11-09 09:17 +0100 http://bitbucket.org/cffi/cffi/changeset/2e0764ba0a2a/ Log: Support "number + pointer" too, like in C diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -2054,8 +2054,15 @@ CDataObject *cd; CTypeDescrObject *ctptr; - if (!CData_Check(v)) - goto not_implemented; + if (!CData_Check(v)) { + PyObject *swap; + assert(CData_Check(w)); + if (sign != 1) + goto not_implemented; + swap = v; + v = w; + w = swap; + } i = PyNumber_AsSsize_t(w, PyExc_OverflowError); if (i == -1 && PyErr_Occurred()) diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -553,6 +553,7 @@ assert repr(a) == "" % ( 3*5*size_of_int(),) assert repr(a + 0).startswith(" Author: Armin Rigo Branch: Changeset: r1397:9ba268ca7739 Date: 2013-11-09 09:27 +0100 http://bitbucket.org/cffi/cffi/changeset/9ba268ca7739/ Log: Fix a segfault diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -2112,23 +2112,21 @@ CDataObject *cdv = (CDataObject *)v; CDataObject *cdw = (CDataObject *)w; CTypeDescrObject *ct = cdw->c_type; - Py_ssize_t diff; - Py_ssize_t itemsize; + Py_ssize_t diff, itemsize; if (ct->ct_flags & CT_ARRAY) /* ptr_to_T - array_of_T: ok */ ct = (CTypeDescrObject *)ct->ct_stuff; - itemsize = ct->ct_itemdescr->ct_size; - if (ct->ct_flags & CT_IS_VOID_PTR) - itemsize = 1; - if (ct != cdv->c_type || !(ct->ct_flags & CT_POINTER) || - (itemsize <= 0)) { + (ct->ct_itemdescr->ct_size <= 0 && + !(ct->ct_flags & CT_IS_VOID_PTR))) { PyErr_Format(PyExc_TypeError, "cannot subtract cdata '%s' and cdata '%s'", cdv->c_type->ct_name, ct->ct_name); return NULL; } + itemsize = ct->ct_itemdescr->ct_size; + if (itemsize <= 0) itemsize = 1; diff = (cdv->c_data - cdw->c_data) / itemsize; #if PY_MAJOR_VERSION < 3 return PyInt_FromSsize_t(diff); diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3117,6 +3117,8 @@ q = cast(new_pointer_type(new_primitive_type("char")), 100000) py.test.raises(TypeError, "p - q") py.test.raises(TypeError, "q - p") + py.test.raises(TypeError, "p + cast(new_primitive_type('int'), 42)") + py.test.raises(TypeError, "p - cast(new_primitive_type('int'), 42)") def test_version(): From noreply at buildbot.pypy.org Sat Nov 9 10:34:45 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 10:34:45 +0100 (CET) Subject: [pypy-commit] pypy default: Add the same fast path as in CPython Message-ID: <20131109093445.5BAE61C0162@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67888:20b038f7ded3 Date: 2013-11-09 09:45 +0100 http://bitbucket.org/pypy/pypy/changeset/20b038f7ded3/ Log: Add the same fast path as in CPython diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -19,9 +19,9 @@ _cdata = lltype.nullptr(rffi.CCHARP.TO) def __init__(self, space, cdata, ctype): - from pypy.module._cffi_backend import ctypeprim + from pypy.module._cffi_backend import ctypeobj assert lltype.typeOf(cdata) == rffi.CCHARP - assert isinstance(ctype, ctypeprim.W_CType) + assert isinstance(ctype, ctypeobj.W_CType) self.space = space self._cdata = cdata # don't forget keepalive_until_here! self.ctype = ctype @@ -211,7 +211,21 @@ keepalive_until_here(w_value) return # + # A fast path for [0:N] = "somestring". + from pypy.module._cffi_backend import ctypeprim space = self.space + if (space.isinstance_w(w_value, space.w_str) and + isinstance(ctitem, ctypeprim.W_CTypePrimitiveChar)): + from rpython.rtyper.annlowlevel import llstr + from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw + value = space.str_w(w_value) + if len(value) != length: + raise operationerrfmt(space.w_ValueError, + "need a string of length %d, got %d", + length, len(value)) + copy_string_to_raw(llstr(value), cdata, 0, length) + return + # w_iter = space.iter(w_value) for i in range(length): try: From noreply at buildbot.pypy.org Sat Nov 9 10:34:44 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 10:34:44 +0100 (CET) Subject: [pypy-commit] pypy default: Update to cffi/9ba268ca7739. In-progress. Message-ID: <20131109093444.2D1441C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67887:d2503dcb595a Date: 2013-11-09 09:34 +0100 http://bitbucket.org/pypy/pypy/changeset/d2503dcb595a/ Log: Update to cffi/9ba268ca7739. In-progress. diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -245,19 +245,22 @@ space = self.space if isinstance(w_other, W_CData): from pypy.module._cffi_backend import ctypeptr, ctypearray + from pypy.module._cffi_backend import ctypevoid ct = w_other.ctype if isinstance(ct, ctypearray.W_CTypeArray): ct = ct.ctptr # if (ct is not self.ctype or not isinstance(ct, ctypeptr.W_CTypePointer) or - ct.ctitem.size <= 0): + (ct.ctitem.size <= 0 and not ct.is_void_ptr)): raise operationerrfmt(space.w_TypeError, "cannot subtract cdata '%s' and cdata '%s'", self.ctype.name, ct.name) # + itemsize = ct.ctitem.size + if itemsize <= 0: itemsize = 1 diff = (rffi.cast(lltype.Signed, self._cdata) - - rffi.cast(lltype.Signed, w_other._cdata)) // ct.ctitem.size + rffi.cast(lltype.Signed, w_other._cdata)) // itemsize return space.wrap(diff) # return self._add_or_sub(w_other, -1) @@ -441,6 +444,7 @@ __getitem__ = interp2app(W_CData.getitem), __setitem__ = interp2app(W_CData.setitem), __add__ = interp2app(W_CData.add), + __radd__ = interp2app(W_CData.add), __sub__ = interp2app(W_CData.sub), __getattr__ = interp2app(W_CData.getattr), __setattr__ = interp2app(W_CData.setattr), diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -236,11 +236,15 @@ def add(self, cdata, i): space = self.space ctitem = self.ctitem + itemsize = ctitem.size if ctitem.size < 0: - raise operationerrfmt(space.w_TypeError, + if self.is_void_ptr: + itemsize = 1 + else: + raise operationerrfmt(space.w_TypeError, "ctype '%s' points to items of unknown size", self.name) - p = rffi.ptradd(cdata, i * self.ctitem.size) + p = rffi.ptradd(cdata, i * itemsize) return cdataobj.W_CData(space, p, self) def cast(self, w_ob): diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -542,6 +542,7 @@ assert repr(a) == "" % ( 3*5*size_of_int(),) assert repr(a + 0).startswith(" Author: Armin Rigo Branch: Changeset: r67889:73de8b43bff0 Date: 2013-11-09 10:33 +0100 http://bitbucket.org/pypy/pypy/changeset/73de8b43bff0/ Log: Port cffi's c99-array branch. diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -34,19 +34,8 @@ datasize = self.size # if datasize < 0: - if (space.isinstance_w(w_init, space.w_list) or - space.isinstance_w(w_init, space.w_tuple)): - length = space.int_w(space.len(w_init)) - elif space.isinstance_w(w_init, space.w_basestring): - # from a string, we add the null terminator - length = space.int_w(space.len(w_init)) + 1 - else: - length = space.getindex_w(w_init, space.w_OverflowError) - if length < 0: - raise OperationError(space.w_ValueError, - space.wrap("negative array length")) - w_init = space.w_None - # + from pypy.module._cffi_backend import misc + w_init, length = misc.get_new_array_length(space, w_init) try: datasize = ovfcheck(length * self.ctitem.size) except OverflowError: diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -15,15 +15,12 @@ class W_CTypePtrOrArray(W_CType): - _attrs_ = ['ctitem', 'can_cast_anything', 'is_struct_ptr', - 'length'] - _immutable_fields_ = ['ctitem', 'can_cast_anything', 'is_struct_ptr', - 'length'] + _attrs_ = ['ctitem', 'can_cast_anything', 'length'] + _immutable_fields_ = ['ctitem', 'can_cast_anything', 'length'] length = -1 def __init__(self, space, size, extra, extra_position, ctitem, could_cast_anything=True): - from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion name, name_position = ctitem.insert_name(extra, extra_position) W_CType.__init__(self, space, size, name, name_position) # this is the "underlying type": @@ -32,7 +29,6 @@ # - for functions, it is the return type self.ctitem = ctitem self.can_cast_anything = could_cast_anything and ctitem.cast_anything - self.is_struct_ptr = isinstance(ctitem, W_CTypeStructOrUnion) def is_char_ptr_or_array(self): return isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveChar) @@ -195,6 +191,7 @@ W_CTypePtrBase.__init__(self, space, size, extra, 2, ctitem) def newp(self, w_init): + from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion space = self.space ctitem = self.ctitem datasize = ctitem.size @@ -202,10 +199,15 @@ raise operationerrfmt(space.w_TypeError, "cannot instantiate ctype '%s' of unknown size", self.name) - if self.is_struct_ptr: + if isinstance(ctitem, W_CTypeStructOrUnion): # 'newp' on a struct-or-union pointer: in this case, we return # a W_CDataPtrToStruct object which has a strong reference # to a W_CDataNewOwning that really contains the structure. + # + if ctitem.with_var_array and not space.is_w(w_init, space.w_None): + datasize = ctitem.convert_struct_from_object( + lltype.nullptr(rffi.CCHARP.TO), w_init, datasize) + # cdatastruct = cdataobj.W_CDataNewOwning(space, datasize, ctitem) cdata = cdataobj.W_CDataPtrToStructOrUnion(space, cdatastruct._cdata, @@ -321,7 +323,8 @@ space = self.space ctype2 = cdata.ctype if (isinstance(ctype2, W_CTypeStructOrUnion) or - (isinstance(ctype2, W_CTypePtrOrArray) and ctype2.is_struct_ptr)): + (isinstance(ctype2, W_CTypePtrOrArray) and + isinstance(ctype2.ctitem, W_CTypeStructOrUnion))): ptrdata = rffi.ptradd(cdata._cdata, offset) return cdataobj.W_CData(space, ptrdata, self) else: diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -9,7 +9,8 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, intmask -from rpython.rtyper.lltypesystem import rffi +from rpython.rlib.rarithmetic import ovfcheck +from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module._cffi_backend import cdataobj, ctypeprim, misc from pypy.module._cffi_backend.ctypeobj import W_CType @@ -17,12 +18,13 @@ class W_CTypeStructOrUnion(W_CType): _immutable_fields_ = ['alignment?', 'fields_list?', 'fields_dict?', - 'custom_field_pos?'] + 'custom_field_pos?', 'with_var_array?'] # fields added by complete_struct_or_union(): alignment = -1 fields_list = None fields_dict = None custom_field_pos = False + with_var_array = False def __init__(self, space, name): W_CType.__init__(self, space, -1, name, len(name)) @@ -90,12 +92,13 @@ pass def convert_from_object(self, cdata, w_ob): - space = self.space - if self._copy_from_same(cdata, w_ob): - return + if not self._copy_from_same(cdata, w_ob): + self.convert_struct_from_object(cdata, w_ob) + def convert_struct_from_object(self, cdata, w_ob, optvarsize=-1): self._check_only_one_argument_for_union(w_ob) + space = self.space if (space.isinstance_w(w_ob, space.w_list) or space.isinstance_w(w_ob, space.w_tuple)): lst_w = space.listview(w_ob) @@ -104,7 +107,9 @@ "too many initializers for '%s' (got %d)", self.name, len(lst_w)) for i in range(len(lst_w)): - self.fields_list[i].write(cdata, lst_w[i]) + optvarsize = self.fields_list[i].write_v(cdata, lst_w[i], + optvarsize) + return optvarsize elif space.isinstance_w(w_ob, space.w_dict): lst_w = space.fixedview(w_ob) @@ -116,11 +121,16 @@ except KeyError: space.raise_key_error(w_key) assert 0 - cf.write(cdata, space.getitem(w_ob, w_key)) + optvarsize = cf.write_v(cdata, space.getitem(w_ob, w_key), + optvarsize) + return optvarsize else: - raise self._convert_error("list or tuple or dict or struct-cdata", - w_ob) + if optvarsize == -1: + msg = "list or tuple or dict or struct-cdata" + else: + msg = "list or tuple or dict" + raise self._convert_error(msg, w_ob) @jit.elidable def _getcfield_const(self, attr): @@ -192,6 +202,37 @@ else: self.ctype.convert_from_object(cdata, w_ob) + def write_v(self, cdata, w_ob, optvarsize): + # a special case for var-sized C99 arrays + from pypy.module._cffi_backend import ctypearray + ct = self.ctype + if isinstance(ct, ctypearray.W_CTypeArray) and ct.length < 0: + space = ct.space + w_ob, varsizelength = misc.get_new_array_length(space, w_ob) + if optvarsize != -1: + # in this mode, the only purpose of this function is to compute + # the real size of the structure from a var-sized C99 array + assert cdata == lltype.nullptr(rffi.CCHARP.TO) + itemsize = ct.ctitem.size + try: + varsize = ovfcheck(itemsize * varsizelength) + size = ovfcheck(self.offset + varsize) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("array size would overflow a ssize_t")) + assert size >= 0 + return max(size, optvarsize) + # if 'value' was only an integer, get_new_array_length() returns + # w_ob = space.w_None. Detect if this was the case, + # and if so, stop here, leaving the content uninitialized + # (it should be zero-initialized from somewhere else). + if space.is_w(w_ob, space.w_None): + return optvarsize + # + if optvarsize == -1: + self.write(cdata, w_ob) + return optvarsize + def convert_bitfield_to_object(self, cdata): ctype = self.ctype space = ctype.space diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py --- a/pypy/module/_cffi_backend/misc.py +++ b/pypy/module/_cffi_backend/misc.py @@ -278,6 +278,22 @@ # ____________________________________________________________ +def get_new_array_length(space, w_value): + if (space.isinstance_w(w_value, space.w_list) or + space.isinstance_w(w_value, space.w_tuple)): + return (w_value, space.int_w(space.len(w_value))) + elif space.isinstance_w(w_value, space.w_basestring): + # from a string, we add the null terminator + return (w_value, space.int_w(space.len(w_value)) + 1) + else: + explicitlength = space.getindex_w(w_value, space.w_OverflowError) + if explicitlength < 0: + raise OperationError(space.w_ValueError, + space.wrap("negative array length")) + return (space.w_None, explicitlength) + +# ____________________________________________________________ + @specialize.arg(0) def _raw_memcopy_tp(TPP, source, dest): # in its own function: LONGLONG may make the whole function jit-opaque diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -158,8 +158,10 @@ fields_list = [] fields_dict = {} custom_field_pos = False + with_var_array = False - for w_field in fields_w: + for i in range(len(fields_w)): + w_field = fields_w[i] field_w = space.fixedview(w_field) if not (2 <= len(field_w) <= 4): raise OperationError(space.w_TypeError, @@ -176,7 +178,11 @@ "duplicate field name '%s'", fname) # if ftype.size < 0: - raise operationerrfmt(space.w_TypeError, + if (isinstance(ftype, ctypearray.W_CTypeArray) and fbitsize < 0 + and (i == len(fields_w) - 1 or foffset != -1)): + with_var_array = True + else: + raise operationerrfmt(space.w_TypeError, "field '%s.%s' has ctype '%s' of unknown size", w_ctype.name, fname, ftype.name) # @@ -235,7 +241,8 @@ fields_list.append(fld) fields_dict[fname] = fld - boffset += ftype.size * 8 + if ftype.size >= 0: + boffset += ftype.size * 8 prev_bitfield_size = 0 else: @@ -359,6 +366,7 @@ w_ctype.fields_list = fields_list w_ctype.fields_dict = fields_dict w_ctype.custom_field_pos = custom_field_pos + w_ctype.with_var_array = with_var_array # ____________________________________________________________ From noreply at buildbot.pypy.org Sat Nov 9 10:34:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 10:34:47 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20131109093447.D18421C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67890:552cf038d95c Date: 2013-11-09 10:34 +0100 http://bitbucket.org/pypy/pypy/changeset/552cf038d95c/ Log: merge heads diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -410,7 +410,6 @@ def descr_getslice(self, space, w_i, w_j): return space.getitem(self, space.newslice(w_i, w_j, space.w_None)) - def descr_setitem(self, space, w_idx, w_item): "x.__setitem__(i, y) <==> x[i]=y" if space.isinstance_w(w_idx, space.w_slice): @@ -913,10 +912,18 @@ return space.w_NotImplemented a = mytype.w_class(space) a.setlen(self.len + w_other.len, overallocate=False) - for i in range(self.len): - a.buffer[i] = self.buffer[i] - for i in range(w_other.len): - a.buffer[i + self.len] = w_other.buffer[i] + if self.len: + rffi.c_memcpy( + rffi.cast(rffi.VOIDP, a.buffer), + rffi.cast(rffi.VOIDP, self.buffer), + self.len * mytype.bytes + ) + if w_other.len: + rffi.c_memcpy( + rffi.cast(rffi.VOIDP, rffi.ptradd(a.buffer, self.len)), + rffi.cast(rffi.VOIDP, w_other.buffer), + w_other.len * mytype.bytes + ) return a def descr_inplace_add(self, space, w_other): @@ -925,8 +932,12 @@ oldlen = self.len otherlen = w_other.len self.setlen(oldlen + otherlen) - for i in range(otherlen): - self.buffer[oldlen + i] = w_other.buffer[i] + if otherlen: + rffi.c_memcpy( + rffi.cast(rffi.VOIDP, rffi.ptradd(self.buffer, oldlen)), + rffi.cast(rffi.VOIDP, w_other.buffer), + otherlen * mytype.bytes + ) return self def descr_mul(self, space, w_repeat): diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -96,8 +96,13 @@ idx += 1 def _escape(self, box): - if box in self.new_boxes: - self.new_boxes[box] = False + try: + unescaped = self.new_boxes[box] + except KeyError: + pass + else: + if unescaped: + self.new_boxes[box] = False try: deps = self.dependencies.pop(box) except KeyError: From noreply at buildbot.pypy.org Sat Nov 9 11:27:38 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 11:27:38 +0100 (CET) Subject: [pypy-commit] pypy default: Test and fix for issue #1632. Message-ID: <20131109102738.268F61C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67891:a3af3cc958c5 Date: 2013-11-09 11:27 +0100 http://bitbucket.org/pypy/pypy/changeset/a3af3cc958c5/ Log: Test and fix for issue #1632. diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -304,8 +304,12 @@ except IndexError: return None if hint in self.DONT_MOVE_GPR: - self.ARGUMENTS_GPR[i] = hint - res = hint + for j in range(i): + if hint is self.ARGUMENTS_GPR[j]: + break + else: + self.ARGUMENTS_GPR[i] = hint + res = hint return res def _unused_xmm(self): diff --git a/rpython/jit/backend/x86/test/test_callbuilder.py b/rpython/jit/backend/x86/test/test_callbuilder.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/x86/test/test_callbuilder.py @@ -0,0 +1,33 @@ +from rpython.jit.backend.x86 import callbuilder +from rpython.jit.backend.x86.regloc import esi, edi, ebx, ecx, ImmedLoc + + +class FakeAssembler: + mc = None + class _regalloc: + class rm: + free_regs = [ebx] + + def __init__(self): + self._log = [] + + def _is_asmgcc(self): + return False + + def regalloc_mov(self, src, dst): + self._log.append(('mov', src, dst)) + + +def test_base_case(): + asm = FakeAssembler() + cb = callbuilder.CallBuilder64(asm, ImmedLoc(12345), [ebx, ebx]) + cb.prepare_arguments() + assert asm._log == [('mov', ebx, edi), + ('mov', ebx, esi)] + +def test_bug_call_release_gil(): + asm = FakeAssembler() + cb = callbuilder.CallBuilder64(asm, ImmedLoc(12345), [ebx, ebx]) + cb.select_call_release_gil_mode() + cb.prepare_arguments() + assert asm._log == [('mov', ebx, ecx)] From noreply at buildbot.pypy.org Sat Nov 9 14:41:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 14:41:17 +0100 (CET) Subject: [pypy-commit] cffi thread-safe: Trying to make cffi thread-safe by adding a few locks Message-ID: <20131109134117.D16691C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: thread-safe Changeset: r1398:17a65544a8e7 Date: 2013-11-09 13:31 +0100 http://bitbucket.org/cffi/cffi/changeset/17a65544a8e7/ Log: Trying to make cffi thread-safe by adding a few locks From noreply at buildbot.pypy.org Sat Nov 9 14:41:18 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 14:41:18 +0100 (CET) Subject: [pypy-commit] cffi default: Clean up. (May also fix an obscure bug...) Message-ID: <20131109134118.E03D81C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1399:e2dd1a405fda Date: 2013-11-09 14:00 +0100 http://bitbucket.org/cffi/cffi/changeset/e2dd1a405fda/ Log: Clean up. (May also fix an obscure bug...) diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -117,18 +117,23 @@ def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, cfaf = self._parsed_types[cdecl] - if consider_function_as_funcptr and not cfaf: - raise KeyError + btype, really_a_function_type = self._parsed_types[cdecl] except KeyError: key = cdecl if not isinstance(cdecl, str): # unicode, on Python 2 cdecl = cdecl.encode('ascii') - cfaf = consider_function_as_funcptr - type = self._parser.parse_type(cdecl, - consider_function_as_funcptr=cfaf) + type = self._parser.parse_type(cdecl) + if hasattr(type, 'as_function_pointer'): + really_a_function_type = True + type = type.as_function_pointer() + else: + really_a_function_type = False btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, cfaf + self._parsed_types[key] = btype, really_a_function_type + # + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) return btype def typeof(self, cdecl): diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -222,14 +222,13 @@ else: self._declare('variable ' + decl.name, tp) - def parse_type(self, cdecl, consider_function_as_funcptr=False): + def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type, - consider_function_as_funcptr=consider_function_as_funcptr) + return self._get_type(exprnode.type) def _declare(self, name, obj): if name in self._declarations: @@ -249,28 +248,17 @@ return model.ConstPointerType(type) return model.PointerType(type) - def _get_type(self, typenode, convert_array_to_pointer=False, - name=None, partial_length_ok=False, - consider_function_as_funcptr=False): + def _get_type(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): type = self._declarations['typedef ' + typenode.type.names[0]] - if isinstance(type, model.ArrayType): - if convert_array_to_pointer: - return type.item - else: - if (consider_function_as_funcptr and - isinstance(type, model.RawFunctionType)): - return type.as_function_pointer() return type # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type - if convert_array_to_pointer: - return self._get_type_pointer(self._get_type(typenode.type)) if typenode.dim is None: length = None else: @@ -331,10 +319,7 @@ # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - result = self._parse_function_type(typenode, name) - if consider_function_as_funcptr: - result = result.as_function_pointer() - return result + return self._parse_function_type(typenode, name) # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): @@ -365,13 +350,19 @@ isinstance(params[0].type.type, pycparser.c_ast.IdentifierType) and list(params[0].type.type.names) == ['void']): del params[0] - args = [self._get_type(argdeclnode.type, - convert_array_to_pointer=True, - consider_function_as_funcptr=True) + args = [self._as_func_arg(self._get_type(argdeclnode.type)) for argdeclnode in params] result = self._get_type(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) + def _as_func_arg(self, type): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + def _is_constant_globalvar(self, typenode): if isinstance(typenode, pycparser.c_ast.PtrDecl): return 'const' in typenode.quals From noreply at buildbot.pypy.org Sat Nov 9 14:41:19 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 14:41:19 +0100 (CET) Subject: [pypy-commit] cffi thread-safe: Add the basic file exporting locks Message-ID: <20131109134119.DB0AA1C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: thread-safe Changeset: r1400:3a2e2d0033c2 Date: 2013-11-09 14:01 +0100 http://bitbucket.org/cffi/cffi/changeset/3a2e2d0033c2/ Log: Add the basic file exporting locks diff --git a/cffi/lock.py b/cffi/lock.py new file mode 100644 --- /dev/null +++ b/cffi/lock.py @@ -0,0 +1,12 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock From noreply at buildbot.pypy.org Sat Nov 9 14:41:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 14:41:20 +0100 (CET) Subject: [pypy-commit] cffi thread-safe: hg merge default Message-ID: <20131109134120.D73721C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: thread-safe Changeset: r1401:d13bbbfc83fc Date: 2013-11-09 14:01 +0100 http://bitbucket.org/cffi/cffi/changeset/d13bbbfc83fc/ Log: hg merge default diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -117,18 +117,23 @@ def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, cfaf = self._parsed_types[cdecl] - if consider_function_as_funcptr and not cfaf: - raise KeyError + btype, really_a_function_type = self._parsed_types[cdecl] except KeyError: key = cdecl if not isinstance(cdecl, str): # unicode, on Python 2 cdecl = cdecl.encode('ascii') - cfaf = consider_function_as_funcptr - type = self._parser.parse_type(cdecl, - consider_function_as_funcptr=cfaf) + type = self._parser.parse_type(cdecl) + if hasattr(type, 'as_function_pointer'): + really_a_function_type = True + type = type.as_function_pointer() + else: + really_a_function_type = False btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, cfaf + self._parsed_types[key] = btype, really_a_function_type + # + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) return btype def typeof(self, cdecl): diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -222,14 +222,13 @@ else: self._declare('variable ' + decl.name, tp) - def parse_type(self, cdecl, consider_function_as_funcptr=False): + def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type, - consider_function_as_funcptr=consider_function_as_funcptr) + return self._get_type(exprnode.type) def _declare(self, name, obj): if name in self._declarations: @@ -249,28 +248,17 @@ return model.ConstPointerType(type) return model.PointerType(type) - def _get_type(self, typenode, convert_array_to_pointer=False, - name=None, partial_length_ok=False, - consider_function_as_funcptr=False): + def _get_type(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): type = self._declarations['typedef ' + typenode.type.names[0]] - if isinstance(type, model.ArrayType): - if convert_array_to_pointer: - return type.item - else: - if (consider_function_as_funcptr and - isinstance(type, model.RawFunctionType)): - return type.as_function_pointer() return type # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type - if convert_array_to_pointer: - return self._get_type_pointer(self._get_type(typenode.type)) if typenode.dim is None: length = None else: @@ -331,10 +319,7 @@ # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - result = self._parse_function_type(typenode, name) - if consider_function_as_funcptr: - result = result.as_function_pointer() - return result + return self._parse_function_type(typenode, name) # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): @@ -365,13 +350,19 @@ isinstance(params[0].type.type, pycparser.c_ast.IdentifierType) and list(params[0].type.type.names) == ['void']): del params[0] - args = [self._get_type(argdeclnode.type, - convert_array_to_pointer=True, - consider_function_as_funcptr=True) + args = [self._as_func_arg(self._get_type(argdeclnode.type)) for argdeclnode in params] result = self._get_type(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) + def _as_func_arg(self, type): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + def _is_constant_globalvar(self, typenode): if isinstance(typenode, pycparser.c_ast.PtrDecl): return 'const' in typenode.quals From noreply at buildbot.pypy.org Sat Nov 9 14:41:21 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 14:41:21 +0100 (CET) Subject: [pypy-commit] cffi thread-safe: Add locking. Not really tested, apart from the absence of double locking. Message-ID: <20131109134121.DD9CF1C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: thread-safe Changeset: r1402:0581b30c5944 Date: 2013-11-09 14:40 +0100 http://bitbucket.org/cffi/cffi/changeset/0581b30c5944/ Log: Add locking. Not really tested, apart from the absence of double locking. diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -1,4 +1,5 @@ import types +from .lock import allocate_lock try: callable @@ -61,6 +62,7 @@ # rely on it! It's probably not going to work well.) self._backend = backend + self._lock = allocate_lock() self._parser = cparser.Parser() self._cached_btypes = {} self._parsed_types = types.ModuleType('parsed_types').__dict__ @@ -74,7 +76,8 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - self.BVoidP = self._get_cached_btype(model.voidp_type) + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -95,11 +98,12 @@ if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') - self._parser.parse(csource, override=override) - self._cdefsources.append(csource) - if override: - for cache in self._function_caches: - cache.clear() + with self._lock: + self._parser.parse(csource, override=override) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -109,28 +113,41 @@ library we only look for the actual (untyped) symbols. """ assert isinstance(name, basestring) or name is None - lib, function_cache = _make_ffi_library(self, name, flags) - self._function_caches.append(function_cache) - self._libraries.append(lib) + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) return lib + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + if hasattr(type, 'as_function_pointer'): + really_a_function_type = True + type = type.as_function_pointer() + else: + really_a_function_type = False + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, really_a_function_type = self._parsed_types[cdecl] + result = self._parsed_types[cdecl] except KeyError: - key = cdecl - if not isinstance(cdecl, str): # unicode, on Python 2 - cdecl = cdecl.encode('ascii') - type = self._parser.parse_type(cdecl) - if hasattr(type, 'as_function_pointer'): - really_a_function_type = True - type = type.as_function_pointer() - else: - really_a_function_type = False - btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, really_a_function_type + with self._lock: + result = self._typeof_locked(cdecl) # + btype, really_a_function_type = result if really_a_function_type and not consider_function_as_funcptr: raise CDefError("the type %r is a function type, not a " "pointer-to-function type" % (cdecl,)) @@ -151,7 +168,8 @@ return res if (isinstance(cdecl, types.FunctionType) and hasattr(cdecl, '_cffi_base_type')): - return self._get_cached_btype(cdecl._cffi_base_type) + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) def sizeof(self, cdecl): @@ -288,14 +306,17 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + with self._lock: + try: + gc_weakrefs = self.gc_weakrefs + except AttributeError: + from .gc_weakref import GcWeakrefs + gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) + return gc_weakrefs.build(cdata, destructor) def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! try: BType = self._cached_btypes[type] except KeyError: @@ -330,7 +351,8 @@ def _pointer_to(self, ctype): from . import model - return model.pointer_cache(self, ctype) + with self._lock: + return model.pointer_cache(self, ctype) def addressof(self, cdata, field=None): """Return the address of a . @@ -350,10 +372,12 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ - self._parser.include(ffi_to_include._parser) - self._cdefsources.append('[') - self._cdefsources.extend(ffi_to_include._cdefsources) - self._cdefsources.append(']') + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) @@ -380,7 +404,7 @@ backendlib = backend.load_library(path, flags) copied_enums = [] # - def make_accessor(name): + def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: tp = ffi._parser._declarations[key] @@ -412,11 +436,17 @@ if enumname not in library.__dict__: library.__dict__[enumname] = enumval copied_enums.append(True) + if name in library.__dict__: + return # - if name in library.__dict__: # copied from an enum value just above, - return # or multithread's race condition raise AttributeError(name) # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + make_accessor_locked(name) + # class FFILibrary(object): def __getattr__(self, name): make_accessor(name) @@ -452,4 +482,5 @@ except (KeyError, AttributeError, TypeError): return None else: - return ffi._get_cached_btype(tp) + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/cffi/gc_weakref.py b/cffi/gc_weakref.py --- a/cffi/gc_weakref.py +++ b/cffi/gc_weakref.py @@ -14,6 +14,6 @@ def build(self, cdata, destructor): # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi.typeof(cdata), cdata) + new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) self.data[ref(new_cdata, self.remove)] = destructor, cdata return new_cdata diff --git a/cffi/lock.py b/cffi/lock.py --- a/cffi/lock.py +++ b/cffi/lock.py @@ -10,3 +10,21 @@ from _thread import allocate_lock except ImportError: from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -276,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi.typeof("ssize_t(*)(ssize_t)") + BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -386,15 +386,16 @@ def _load_constant(self, is_int, tp, name, module): funcname = '_cffi_const_%s' % name if is_int: - BFunc = self.ffi.typeof("int(*)(long long*)") + BType = self.ffi._typeof_locked("long long*")[0] + BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) - p = self.ffi.new("long long*") + p = self.ffi.new(BType) negative = function(p) value = int(p[0]) if value < 0 and not negative: value += (1 << (8*self.ffi.sizeof("long long"))) else: - BFunc = self.ffi.typeof(tp.get_c_name('(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() return value @@ -438,10 +439,11 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BFunc = self.ffi.typeof("int(*)(char*)") + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = '_cffi_e_%s_%s' % (prefix, name) function = module.load_function(BFunc, funcname) - p = self.ffi.new("char[]", 256) + p = self.ffi.new(BType, 256) if function(p) < 0: error = self.ffi.string(p) if sys.version_info >= (3,): @@ -493,7 +495,7 @@ # sense that "a=..." is forbidden if tp.length == '...': funcname = '_cffi_sizeof_%s' % (name,) - BFunc = self.ffi.typeof('size_t(*)(void)') + BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0] function = module.load_function(BFunc, funcname) size = function() BItemType = self.ffi._get_cached_btype(tp.item) @@ -516,7 +518,7 @@ # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. funcname = '_cffi_var_%s' % name - BFunc = self.ffi.typeof(tp.get_c_name('*(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0] function = module.load_function(BFunc, funcname) ptr = function() def getter(library): diff --git a/cffi/verifier.py b/cffi/verifier.py --- a/cffi/verifier.py +++ b/cffi/verifier.py @@ -42,18 +42,21 @@ def write_source(self, file=None): """Write the C source code. It is produced in 'self.sourcefilename', which can be tweaked beforehand.""" - if self._has_source and file is None: - raise ffiplatform.VerificationError("source code already written") - self._write_source(file) + with self.ffi._lock: + if self._has_source and file is None: + raise ffiplatform.VerificationError( + "source code already written") + self._write_source(file) def compile_module(self): """Write the C source code (if not done already) and compile it. This produces a dynamic link library in 'self.modulefilename'.""" - if self._has_module: - raise ffiplatform.VerificationError("module already compiled") - if not self._has_source: - self._write_source() - self._compile_module() + with self.ffi._lock: + if self._has_module: + raise ffiplatform.VerificationError("module already compiled") + if not self._has_source: + self._write_source() + self._compile_module() def load_library(self): """Get a C module from this Verifier instance. @@ -62,11 +65,14 @@ operations to the C module. If necessary, the C code is written and compiled first. """ - if not self._has_module: - self._locate_module() + with self.ffi._lock: if not self._has_module: - self.compile_module() - return self._load_library() + self._locate_module() + if not self._has_module: + if not self._has_source: + self._write_source() + self._compile_module() + return self._load_library() def get_module_name(self): basename = os.path.basename(self.modulefilename) @@ -81,7 +87,9 @@ def get_extension(self): if not self._has_source: - self._write_source() + with self.ffi._lock: + if not self._has_source: + self._write_source() sourcename = ffiplatform.maybe_relative_path(self.sourcefilename) modname = self.get_module_name() return ffiplatform.get_extension(sourcename, modname, **self.kwds) diff --git a/testing/test_parsing.py b/testing/test_parsing.py --- a/testing/test_parsing.py +++ b/testing/test_parsing.py @@ -130,8 +130,9 @@ ffi.cdef(""" typedef int (*fn_t)(int[5]); """) - type = ffi._parser.parse_type("fn_t") - BType = ffi._get_cached_btype(type) + with ffi._lock: + type = ffi._parser.parse_type("fn_t") + BType = ffi._get_cached_btype(type) assert str(BType) == '>), , False>' def test_remove_comments(): From noreply at buildbot.pypy.org Sat Nov 9 14:49:54 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 14:49:54 +0100 (CET) Subject: [pypy-commit] cffi default: Update the version number Message-ID: <20131109134954.C45181C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1403:adea54105fc3 Date: 2013-11-09 14:49 +0100 http://bitbucket.org/cffi/cffi/changeset/adea54105fc3/ Log: Update the version number diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5445,7 +5445,7 @@ if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("0.7"); + v = PyText_FromString("0.8"); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3123,4 +3123,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.7" + assert __version__ == "0.8" diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7.2" -__version_info__ = (0, 7, 2) +__version__ = "0.8" +__version_info__ = (0, 8) diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '0.7' +version = '0.8' # The full version, including alpha/beta/rc tags. -release = '0.7.2' +release = '0.8' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -88,7 +88,7 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.7.2.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.tar.gz - Or grab the most current version by following the instructions below. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -102,7 +102,7 @@ `Mailing list `_ """, - version='0.7.2', + version='0.8', packages=['cffi'], zip_safe=False, From noreply at buildbot.pypy.org Sat Nov 9 14:56:30 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 14:56:30 +0100 (CET) Subject: [pypy-commit] pypy default: Update the version number of CFFI Message-ID: <20131109135630.C16BC1C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67892:6d173d04dc88 Date: 2013-11-09 14:55 +0100 http://bitbucket.org/pypy/pypy/changeset/6d173d04dc88/ Log: Update the version number of CFFI diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -7,7 +7,7 @@ appleveldefs = { } interpleveldefs = { - '__version__': 'space.wrap("0.7")', + '__version__': 'space.wrap("0.8")', 'load_library': 'libraryobj.load_library', diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3112,4 +3112,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.7" + assert __version__ == "0.8" From noreply at buildbot.pypy.org Sat Nov 9 16:55:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 16:55:12 +0100 (CET) Subject: [pypy-commit] cffi default: Merge the 'thread-safe' branch: adds locks in a few hopeful places. Message-ID: <20131109155512.56D581C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1404:d852277c4508 Date: 2013-11-09 16:54 +0100 http://bitbucket.org/cffi/cffi/changeset/d852277c4508/ Log: Merge the 'thread-safe' branch: adds locks in a few hopeful places. diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -1,4 +1,5 @@ import types +from .lock import allocate_lock try: callable @@ -61,6 +62,7 @@ # rely on it! It's probably not going to work well.) self._backend = backend + self._lock = allocate_lock() self._parser = cparser.Parser() self._cached_btypes = {} self._parsed_types = types.ModuleType('parsed_types').__dict__ @@ -74,7 +76,8 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - self.BVoidP = self._get_cached_btype(model.voidp_type) + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -95,11 +98,12 @@ if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') - self._parser.parse(csource, override=override) - self._cdefsources.append(csource) - if override: - for cache in self._function_caches: - cache.clear() + with self._lock: + self._parser.parse(csource, override=override) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -109,28 +113,41 @@ library we only look for the actual (untyped) symbols. """ assert isinstance(name, basestring) or name is None - lib, function_cache = _make_ffi_library(self, name, flags) - self._function_caches.append(function_cache) - self._libraries.append(lib) + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) return lib + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + if hasattr(type, 'as_function_pointer'): + really_a_function_type = True + type = type.as_function_pointer() + else: + really_a_function_type = False + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, really_a_function_type = self._parsed_types[cdecl] + result = self._parsed_types[cdecl] except KeyError: - key = cdecl - if not isinstance(cdecl, str): # unicode, on Python 2 - cdecl = cdecl.encode('ascii') - type = self._parser.parse_type(cdecl) - if hasattr(type, 'as_function_pointer'): - really_a_function_type = True - type = type.as_function_pointer() - else: - really_a_function_type = False - btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, really_a_function_type + with self._lock: + result = self._typeof_locked(cdecl) # + btype, really_a_function_type = result if really_a_function_type and not consider_function_as_funcptr: raise CDefError("the type %r is a function type, not a " "pointer-to-function type" % (cdecl,)) @@ -151,7 +168,8 @@ return res if (isinstance(cdecl, types.FunctionType) and hasattr(cdecl, '_cffi_base_type')): - return self._get_cached_btype(cdecl._cffi_base_type) + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) def sizeof(self, cdecl): @@ -288,14 +306,17 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + with self._lock: + try: + gc_weakrefs = self.gc_weakrefs + except AttributeError: + from .gc_weakref import GcWeakrefs + gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) + return gc_weakrefs.build(cdata, destructor) def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! try: BType = self._cached_btypes[type] except KeyError: @@ -330,7 +351,8 @@ def _pointer_to(self, ctype): from . import model - return model.pointer_cache(self, ctype) + with self._lock: + return model.pointer_cache(self, ctype) def addressof(self, cdata, field=None): """Return the address of a . @@ -350,10 +372,12 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ - self._parser.include(ffi_to_include._parser) - self._cdefsources.append('[') - self._cdefsources.extend(ffi_to_include._cdefsources) - self._cdefsources.append(']') + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) @@ -380,7 +404,7 @@ backendlib = backend.load_library(path, flags) copied_enums = [] # - def make_accessor(name): + def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: tp = ffi._parser._declarations[key] @@ -412,11 +436,17 @@ if enumname not in library.__dict__: library.__dict__[enumname] = enumval copied_enums.append(True) + if name in library.__dict__: + return # - if name in library.__dict__: # copied from an enum value just above, - return # or multithread's race condition raise AttributeError(name) # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + make_accessor_locked(name) + # class FFILibrary(object): def __getattr__(self, name): make_accessor(name) @@ -452,4 +482,5 @@ except (KeyError, AttributeError, TypeError): return None else: - return ffi._get_cached_btype(tp) + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/cffi/gc_weakref.py b/cffi/gc_weakref.py --- a/cffi/gc_weakref.py +++ b/cffi/gc_weakref.py @@ -14,6 +14,6 @@ def build(self, cdata, destructor): # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi.typeof(cdata), cdata) + new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) self.data[ref(new_cdata, self.remove)] = destructor, cdata return new_cdata diff --git a/cffi/lock.py b/cffi/lock.py new file mode 100644 --- /dev/null +++ b/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -276,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi.typeof("ssize_t(*)(ssize_t)") + BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -386,15 +386,16 @@ def _load_constant(self, is_int, tp, name, module): funcname = '_cffi_const_%s' % name if is_int: - BFunc = self.ffi.typeof("int(*)(long long*)") + BType = self.ffi._typeof_locked("long long*")[0] + BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) - p = self.ffi.new("long long*") + p = self.ffi.new(BType) negative = function(p) value = int(p[0]) if value < 0 and not negative: value += (1 << (8*self.ffi.sizeof("long long"))) else: - BFunc = self.ffi.typeof(tp.get_c_name('(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() return value @@ -438,10 +439,11 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BFunc = self.ffi.typeof("int(*)(char*)") + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = '_cffi_e_%s_%s' % (prefix, name) function = module.load_function(BFunc, funcname) - p = self.ffi.new("char[]", 256) + p = self.ffi.new(BType, 256) if function(p) < 0: error = self.ffi.string(p) if sys.version_info >= (3,): @@ -493,7 +495,7 @@ # sense that "a=..." is forbidden if tp.length == '...': funcname = '_cffi_sizeof_%s' % (name,) - BFunc = self.ffi.typeof('size_t(*)(void)') + BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0] function = module.load_function(BFunc, funcname) size = function() BItemType = self.ffi._get_cached_btype(tp.item) @@ -516,7 +518,7 @@ # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. funcname = '_cffi_var_%s' % name - BFunc = self.ffi.typeof(tp.get_c_name('*(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0] function = module.load_function(BFunc, funcname) ptr = function() def getter(library): diff --git a/cffi/verifier.py b/cffi/verifier.py --- a/cffi/verifier.py +++ b/cffi/verifier.py @@ -42,18 +42,21 @@ def write_source(self, file=None): """Write the C source code. It is produced in 'self.sourcefilename', which can be tweaked beforehand.""" - if self._has_source and file is None: - raise ffiplatform.VerificationError("source code already written") - self._write_source(file) + with self.ffi._lock: + if self._has_source and file is None: + raise ffiplatform.VerificationError( + "source code already written") + self._write_source(file) def compile_module(self): """Write the C source code (if not done already) and compile it. This produces a dynamic link library in 'self.modulefilename'.""" - if self._has_module: - raise ffiplatform.VerificationError("module already compiled") - if not self._has_source: - self._write_source() - self._compile_module() + with self.ffi._lock: + if self._has_module: + raise ffiplatform.VerificationError("module already compiled") + if not self._has_source: + self._write_source() + self._compile_module() def load_library(self): """Get a C module from this Verifier instance. @@ -62,11 +65,14 @@ operations to the C module. If necessary, the C code is written and compiled first. """ - if not self._has_module: - self._locate_module() + with self.ffi._lock: if not self._has_module: - self.compile_module() - return self._load_library() + self._locate_module() + if not self._has_module: + if not self._has_source: + self._write_source() + self._compile_module() + return self._load_library() def get_module_name(self): basename = os.path.basename(self.modulefilename) @@ -81,7 +87,9 @@ def get_extension(self): if not self._has_source: - self._write_source() + with self.ffi._lock: + if not self._has_source: + self._write_source() sourcename = ffiplatform.maybe_relative_path(self.sourcefilename) modname = self.get_module_name() return ffiplatform.get_extension(sourcename, modname, **self.kwds) diff --git a/testing/test_parsing.py b/testing/test_parsing.py --- a/testing/test_parsing.py +++ b/testing/test_parsing.py @@ -130,8 +130,9 @@ ffi.cdef(""" typedef int (*fn_t)(int[5]); """) - type = ffi._parser.parse_type("fn_t") - BType = ffi._get_cached_btype(type) + with ffi._lock: + type = ffi._parser.parse_type("fn_t") + BType = ffi._get_cached_btype(type) assert str(BType) == '>), , False>' def test_remove_comments(): From noreply at buildbot.pypy.org Sat Nov 9 17:01:56 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 9 Nov 2013 17:01:56 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: This is done now Message-ID: <20131109160156.40BF01C0144@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r5107:08a9f0e51d2f Date: 2013-11-09 08:01 -0800 http://bitbucket.org/pypy/extradoc/changeset/08a9f0e51d2f/ Log: This is done now diff --git a/planning/gc.txt b/planning/gc.txt deleted file mode 100644 --- a/planning/gc.txt +++ /dev/null @@ -1,4 +0,0 @@ -The gc should support "lightweight destructors" that just do -a raw_free(). It should also help because we can then account -the size of the raw memory attached to objects in this way. -See e.g. http://codespeak.net/pipermail/pypy-dev/2010q4/006648.html From noreply at buildbot.pypy.org Sat Nov 9 17:03:06 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 9 Nov 2013 17:03:06 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: This was also done Message-ID: <20131109160306.1D7991C0144@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r5108:5acdadcc32fd Date: 2013-11-09 08:02 -0800 http://bitbucket.org/pypy/extradoc/changeset/5acdadcc32fd/ Log: This was also done diff --git a/planning/primitive-identity.rst b/planning/primitive-identity.rst deleted file mode 100644 --- a/planning/primitive-identity.rst +++ /dev/null @@ -1,70 +0,0 @@ -Primitive objects and object identity -===================================== - -Hi all, - -Recently PyPy merged a pretty big branch that allows for transparently -type-specializing dictionaries. That means if you write something like:: - - strs = {x: str(x) for x in xrange(100000)} - -The dictionary would be specialized for integer keys, obviating the need to -allocate W_IntObjects (PyPy's equivalent to PyIntObject). - -This, however introduces interesting behavior surrounding object identity -(*only* with respect to primitive objects, none of what is discussed affects -either mutable, or user-defined objects), specifically the follow code no -longer works:: - - x = int(x) - z = {x: None} - assert next(iter(z)) is x - -This would similiarly fail if you replaced is with comparing the id()s. The -question now is, is this behavior a violation of the Python language -definition, or is it a legal, interpreter defined difference? - -There are several arguments in favor: - -1) It is easier to implement this way, and removes complexity in the - interpreter implementation, and allows for better performance. - -2) For all of these objects, identity is trivial. That is to say identity could - always be replacement by an equality test and no semantics would be violated. - In that respect requiring that identity be maintained adds no value, the - new object is completely indistinguishable - -3) A reliance on object identity leads to some rather strange behavior, a good - example of this is a recent discussion about the identity shortcut in - ``dict.__contains__`` and ``list.__contains__``, specifically in the case of - ``nan``. At present if you have a ``dict`` with a ``nan`` key the only way - to retreive that value is to use the exact same ``nan`` object, another one - will not do because ``nan`` does not have reflexive identity. Even on - CPython, passing around this object could easily lose it's identity, for - example various functions in the ``math`` module return a ``nan`` given a - ``nan`` argument, but they make no guarntee that they return the same - instance, furthering a reliance on any such behavior is wrong, given that - equality is always a valid substitute. - -And arguments against:: - -1) It may break some existing code (so far the only such code I've found is in - the Python test suite, it was not testing this behavior directly, but rather - incidentally relied on it). - - -I can find no other argument against it. - -Note that should we decide that this is in fact a violation of the language -spec, the resulting behavior in PyPy will be for identity to be equality on -primitive type objects. That is to say, the following code would work:: - - assert all([ - x is (x + 1 - 1) - for x in xrange(sys.minint, sys.maxint) - ]) - -As actually assigning allocating W_IntObjects will not occur. - -Opinions welcome, -Alex From noreply at buildbot.pypy.org Sat Nov 9 17:24:32 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 17:24:32 +0100 (CET) Subject: [pypy-commit] pypy default: os.setgroups() Message-ID: <20131109162432.BD2191C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67893:6c6a6202227d Date: 2013-11-09 15:17 +0100 http://bitbucket.org/pypy/pypy/changeset/6c6a6202227d/ Log: os.setgroups() diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -155,7 +155,8 @@ for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', - 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs']: + 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs', + 'setgroups']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) # not visible via os, inconsistency in nt: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -987,7 +987,26 @@ Return list of supplemental group IDs for the process. """ - return space.newlist([space.wrap(e) for e in os.getgroups()]) + try: + list = os.getgroups() + except OSError, e: + raise wrap_oserror(space, e) + return space.newlist([space.wrap(e) for e in list]) + +def setgroups(space, w_list): + """ setgroups(list) + + Set the groups of the current process to list. + """ + list = [] + for w_gid in space.unpackiterable(w_list): + gid = space.int_w(w_gid) + check_uid_range(space, gid) + list.append(gid) + try: + os.setgroups(list[:]) + except OSError, e: + raise wrap_oserror(space, e) def getpgrp(space): """ getpgrp() -> pgrp diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -615,6 +615,13 @@ def test_os_getgroups(self): os = self.posix assert os.getgroups() == self.getgroups + def test_os_setgroups(self): + os = self.posix + raises(TypeError, os.setgroups, [2, 5, "hello"]) + try: + os.setgroups(os.getgroups()) + except OSError: + pass if hasattr(os, 'getpgid'): def test_os_getpgid(self): diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -712,6 +712,26 @@ return extdef([], [self.GID_T], llimpl=getgroups_llimpl, export_name="ll_os.ll_getgroups") + @registering_if(os, 'setgroups') + def register_os_setgroups(self): + GP = rffi.CArrayPtr(self.GID_T) + c_setgroups = self.llexternal('setgroups', [rffi.SIZE_T, GP], rffi.INT) + + def setgroups_llimpl(list): + n = len(list) + groups = lltype.malloc(GP.TO, n, flavor='raw') + try: + for i in range(n): + groups[i] = rffi.cast(self.GID_T, list[i]) + n = c_setgroups(rffi.cast(rffi.SIZE_T, n), groups) + finally: + lltype.free(groups, flavor='raw') + if n != 0: + raise OSError(rposix.get_errno(), "os_setgroups failed") + + return extdef([[self.GID_T]], None, llimpl=setgroups_llimpl, + export_name="ll_os.ll_setgroups") + @registering_if(os, 'getpgrp') def register_os_getpgrp(self): name = 'getpgrp' diff --git a/rpython/rtyper/module/test/test_posix.py b/rpython/rtyper/module/test/test_posix.py --- a/rpython/rtyper/module/test/test_posix.py +++ b/rpython/rtyper/module/test/test_posix.py @@ -205,3 +205,11 @@ return os.getgroups() ll_a = self.interpret(f, []) assert self.ll_to_list(ll_a) == f() + + def test_setgroups(self): + def f(): + try: + os.setgroups(os.getgroups()) + except OSError: + pass + self.interpret(f, []) From noreply at buildbot.pypy.org Sat Nov 9 17:24:34 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 17:24:34 +0100 (CET) Subject: [pypy-commit] pypy default: os.initgroups() Message-ID: <20131109162434.052701C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67894:150c1dc68223 Date: 2013-11-09 15:27 +0100 http://bitbucket.org/pypy/pypy/changeset/150c1dc68223/ Log: os.initgroups() diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -156,7 +156,7 @@ 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs', - 'setgroups']: + 'setgroups', 'initgroups']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) # not visible via os, inconsistency in nt: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1008,6 +1008,19 @@ except OSError, e: raise wrap_oserror(space, e) + at unwrap_spec(username=str, gid=c_gid_t) +def initgroups(space, username, gid): + """ initgroups(username, gid) -> None + + Call the system initgroups() to initialize the group access list with all of + the groups of which the specified username is a member, plus the specified + group id. + """ + try: + os.initgroups(username, gid) + except OSError, e: + raise wrap_oserror(space, e) + def getpgrp(space): """ getpgrp() -> pgrp diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -615,6 +615,8 @@ def test_os_getgroups(self): os = self.posix assert os.getgroups() == self.getgroups + + if hasattr(os, 'setgroups'): def test_os_setgroups(self): os = self.posix raises(TypeError, os.setgroups, [2, 5, "hello"]) @@ -623,6 +625,11 @@ except OSError: pass + if hasattr(os, 'initgroups'): + def test_os_initgroups(self): + os = self.posix + raises(OSError, os.initgroups, "crW2hTQC", 100) + if hasattr(os, 'getpgid'): def test_os_getpgid(self): os = self.posix diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -702,14 +702,15 @@ groups = lltype.malloc(GP.TO, n, flavor='raw') try: n = c_getgroups(n, groups) - result = [groups[i] for i in range(n)] + result = [rffi.cast(lltype.Signed, groups[i]) + for i in range(n)] finally: lltype.free(groups, flavor='raw') if n >= 0: return result raise OSError(rposix.get_errno(), "os_getgroups failed") - return extdef([], [self.GID_T], llimpl=getgroups_llimpl, + return extdef([], [int], llimpl=getgroups_llimpl, export_name="ll_os.ll_getgroups") @registering_if(os, 'setgroups') @@ -729,9 +730,22 @@ if n != 0: raise OSError(rposix.get_errno(), "os_setgroups failed") - return extdef([[self.GID_T]], None, llimpl=setgroups_llimpl, + return extdef([[int]], None, llimpl=setgroups_llimpl, export_name="ll_os.ll_setgroups") + @registering_if(os, 'initgroups') + def register_os_initgroups(self): + c_initgroups = self.llexternal('initgroups', + [rffi.CCHARP, self.GID_T], rffi.INT) + + def initgroups_llimpl(user, group): + n = c_initgroups(user, rffi.cast(self.GID_T, group)) + if n != 0: + raise OSError(rposix.get_errno(), "os_initgroups failed") + + return extdef([str, int], None, llimpl=initgroups_llimpl, + export_name="ll_os.ll_initgroups") + @registering_if(os, 'getpgrp') def register_os_getpgrp(self): name = 'getpgrp' diff --git a/rpython/rtyper/module/test/test_posix.py b/rpython/rtyper/module/test/test_posix.py --- a/rpython/rtyper/module/test/test_posix.py +++ b/rpython/rtyper/module/test/test_posix.py @@ -206,6 +206,7 @@ ll_a = self.interpret(f, []) assert self.ll_to_list(ll_a) == f() + if hasattr(os, 'setgroups'): def test_setgroups(self): def f(): try: @@ -213,3 +214,14 @@ except OSError: pass self.interpret(f, []) + + if hasattr(os, 'initgroups'): + def test_initgroups(self): + def f(): + try: + os.initgroups('sUJJeumz', 4321) + except OSError: + return 1 + return 0 + res = self.interpret(f, []) + assert res == 1 From noreply at buildbot.pypy.org Sat Nov 9 17:24:35 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 17:24:35 +0100 (CET) Subject: [pypy-commit] pypy default: os.tcgetpgrp(), os.tcsetpgrp() Message-ID: <20131109162435.38CEB1C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67895:d1bb8acdd365 Date: 2013-11-09 15:42 +0100 http://bitbucket.org/pypy/pypy/changeset/d1bb8acdd365/ Log: os.tcgetpgrp(), os.tcsetpgrp() diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -156,7 +156,7 @@ 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs', - 'setgroups', 'initgroups']: + 'setgroups', 'initgroups', 'tcgetpgrp', 'tcsetpgrp']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) # not visible via os, inconsistency in nt: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1121,6 +1121,29 @@ raise wrap_oserror(space, e) return space.w_None + at unwrap_spec(fd=c_int) +def tcgetpgrp(space, fd): + """ tcgetpgrp(fd) -> pgid + + Return the process group associated with the terminal given by a fd. + """ + try: + pgid = os.tcgetpgrp(fd) + except OSError, e: + raise wrap_oserror(space, e) + return space.wrap(pgid) + + at unwrap_spec(fd=c_int, pgid=c_gid_t) +def tcsetpgrp(space, fd, pgid): + """ tcsetpgrp(fd, pgid) + + Set the process group associated with the terminal given by a fd. + """ + try: + os.tcsetpgrp(fd, pgid) + except OSError, e: + raise wrap_oserror(space, e) + def declare_new_w_star(name): if name in RegisterOs.w_star_returning_int: @unwrap_spec(status=c_int) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -630,6 +630,16 @@ os = self.posix raises(OSError, os.initgroups, "crW2hTQC", 100) + if hasattr(os, 'tcgetpgrp'): + def test_os_tcgetpgrp(self): + os = self.posix + raises(OSError, os.tcgetpgrp, 9999) + + if hasattr(os, 'tcsetpgrp'): + def test_os_tcsetpgrp(self): + os = self.posix + raises(OSError, os.tcsetpgrp, 9999, 1) + if hasattr(os, 'getpgid'): def test_os_getpgid(self): os = self.posix diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -129,10 +129,6 @@ ('tms_cutime', rffi.INT), ('tms_cstime', rffi.INT)]) - GID_T = platform.SimpleType('gid_t', rffi.INT) - #TODO right now is used only in getgroups, may need to update other - #functions like setgid - # For now we require off_t to be the same size as LONGLONG, which is the # interface required by callers of functions that thake an argument of type # off_t @@ -693,7 +689,7 @@ @registering_if(os, 'getgroups') def register_os_getgroups(self): - GP = rffi.CArrayPtr(self.GID_T) + GP = rffi.CArrayPtr(rffi.PID_T) c_getgroups = self.llexternal('getgroups', [rffi.INT, GP], rffi.INT) def getgroups_llimpl(): @@ -715,7 +711,7 @@ @registering_if(os, 'setgroups') def register_os_setgroups(self): - GP = rffi.CArrayPtr(self.GID_T) + GP = rffi.CArrayPtr(rffi.PID_T) c_setgroups = self.llexternal('setgroups', [rffi.SIZE_T, GP], rffi.INT) def setgroups_llimpl(list): @@ -723,7 +719,7 @@ groups = lltype.malloc(GP.TO, n, flavor='raw') try: for i in range(n): - groups[i] = rffi.cast(self.GID_T, list[i]) + groups[i] = rffi.cast(rffi.PID_T, list[i]) n = c_setgroups(rffi.cast(rffi.SIZE_T, n), groups) finally: lltype.free(groups, flavor='raw') @@ -736,10 +732,10 @@ @registering_if(os, 'initgroups') def register_os_initgroups(self): c_initgroups = self.llexternal('initgroups', - [rffi.CCHARP, self.GID_T], rffi.INT) + [rffi.CCHARP, rffi.PID_T], rffi.INT) def initgroups_llimpl(user, group): - n = c_initgroups(user, rffi.cast(self.GID_T, group)) + n = c_initgroups(user, rffi.cast(rffi.PID_T, group)) if n != 0: raise OSError(rposix.get_errno(), "os_initgroups failed") @@ -781,6 +777,35 @@ else: return self.extdef_for_os_function_accepting_0int(name) + @registering_if(os, 'tcgetpgrp') + def register_os_tcgetpgrp(self): + c_tcgetpgrp = self.llexternal('tcgetpgrp', [rffi.INT], rffi.PID_T) + + def c_tcgetpgrp_llimpl(fd): + res = c_tcgetpgrp(rffi.cast(rffi.INT, fd)) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "tcgetpgrp failed") + return res + + return extdef([int], int, llimpl=c_tcgetpgrp_llimpl, + export_name='ll_os.ll_os_tcgetpgrp') + + @registering_if(os, 'tcsetpgrp') + def register_os_tcsetpgrp(self): + c_tcsetpgrp = self.llexternal('tcsetpgrp', [rffi.INT, rffi.PID_T], + rffi.INT) + + def c_tcsetpgrp_llimpl(fd, pgrp): + res = c_tcsetpgrp(rffi.cast(rffi.INT, fd), + rffi.cast(rffi.PID_T, pgrp)) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "tcsetpgrp failed") + + return extdef([int, int], None, llimpl=c_tcsetpgrp_llimpl, + export_name='ll_os.ll_os_tcsetpgrp') + @registering_if(os, 'getppid') def register_os_getppid(self): return self.extdef_for_os_function_returning_int('getppid') diff --git a/rpython/rtyper/module/test/test_posix.py b/rpython/rtyper/module/test/test_posix.py --- a/rpython/rtyper/module/test/test_posix.py +++ b/rpython/rtyper/module/test/test_posix.py @@ -225,3 +225,24 @@ return 0 res = self.interpret(f, []) assert res == 1 + + if hasattr(os, 'tcgetpgrp'): + def test_tcgetpgrp(self): + def f(fd): + try: + return os.tcgetpgrp(fd) + except OSError: + return 42 + res = self.interpret(f, [9999]) + assert res == 42 + + if hasattr(os, 'tcsetpgrp'): + def test_tcsetpgrp(self): + def f(fd, pgrp): + try: + os.tcsetpgrp(fd, pgrp) + except OSError: + return 1 + return 0 + res = self.interpret(f, [9999, 1]) + assert res == 1 From noreply at buildbot.pypy.org Sat Nov 9 17:24:36 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 17:24:36 +0100 (CET) Subject: [pypy-commit] pypy default: os.getresuid(), os.getresgid() Message-ID: <20131109162436.5E6661C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67896:8913748cd7eb Date: 2013-11-09 15:56 +0100 http://bitbucket.org/pypy/pypy/changeset/8913748cd7eb/ Log: os.getresuid(), os.getresgid() diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -156,7 +156,8 @@ 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs', - 'setgroups', 'initgroups', 'tcgetpgrp', 'tcsetpgrp']: + 'setgroups', 'initgroups', 'tcgetpgrp', 'tcsetpgrp', + 'getresuid', 'getresgid']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) # not visible via os, inconsistency in nt: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1144,6 +1144,32 @@ except OSError, e: raise wrap_oserror(space, e) +def getresuid(space): + """ getresuid() -> (ruid, euid, suid) + + Get tuple of the current process's real, effective, and saved user ids. + """ + try: + (ruid, euid, suid) = os.getresuid() + except OSError, e: + raise wrap_oserror(space, e) + return space.newtuple([space.wrap(ruid), + space.wrap(euid), + space.wrap(suid)]) + +def getresgid(space): + """ getresgid() -> (rgid, egid, sgid) + + Get tuple of the current process's real, effective, and saved group ids. + """ + try: + (rgid, egid, sgid) = os.getresgid() + except OSError, e: + raise wrap_oserror(space, e) + return space.newtuple([space.wrap(rgid), + space.wrap(egid), + space.wrap(sgid)]) + def declare_new_w_star(name): if name in RegisterOs.w_star_returning_int: @unwrap_spec(status=c_int) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -658,6 +658,18 @@ assert os.getsid(0) == self.getsid0 raises(OSError, os.getsid, -100000) + if hasattr(os, 'getresuid'): + def test_os_getresuid(self): + os = self.posix + res = os.getresuid() + assert len(res) == 3 + + if hasattr(os, 'getresgid'): + def test_os_getresgid(self): + os = self.posix + res = os.getresgid() + assert len(res) == 3 + if hasattr(os, 'sysconf'): def test_os_sysconf(self): os = self.posix diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -834,6 +834,50 @@ def register_os_setsid(self): return self.extdef_for_os_function_returning_int('setsid') + @registering_if(os, 'getresuid') + def register_os_getresuid(self): + c_getresuid = self.llexternal('getresuid', [rffi.INTP] * 3, rffi.INT) + + def c_getresuid_llimpl(): + out = lltype.malloc(rffi.INTP.TO, 3, flavor='raw') + try: + res = c_getresuid(rffi.ptradd(out, 0), + rffi.ptradd(out, 1), + rffi.ptradd(out, 2)) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "getresuid failed") + return (rffi.cast(lltype.Signed, out[0]), + rffi.cast(lltype.Signed, out[1]), + rffi.cast(lltype.Signed, out[2])) + finally: + lltype.free(out, flavor='raw') + + return extdef([], (int, int, int), llimpl=c_getresuid_llimpl, + export_name='ll_os.ll_os_getresuid') + + @registering_if(os, 'getresgid') + def register_os_getresgid(self): + c_getresgid = self.llexternal('getresgid', [rffi.INTP] * 3, rffi.INT) + + def c_getresgid_llimpl(): + out = lltype.malloc(rffi.INTP.TO, 3, flavor='raw') + try: + res = c_getresgid(rffi.ptradd(out, 0), + rffi.ptradd(out, 1), + rffi.ptradd(out, 2)) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "getresgid failed") + return (rffi.cast(lltype.Signed, out[0]), + rffi.cast(lltype.Signed, out[1]), + rffi.cast(lltype.Signed, out[2])) + finally: + lltype.free(out, flavor='raw') + + return extdef([], (int, int, int), llimpl=c_getresgid_llimpl, + export_name='ll_os.ll_os_getresgid') + @registering_str_unicode(os.open) def register_os_open(self, traits): os_open = self.llexternal(traits.posix_function_name('open'), diff --git a/rpython/rtyper/module/test/test_posix.py b/rpython/rtyper/module/test/test_posix.py --- a/rpython/rtyper/module/test/test_posix.py +++ b/rpython/rtyper/module/test/test_posix.py @@ -246,3 +246,21 @@ return 0 res = self.interpret(f, [9999, 1]) assert res == 1 + + if hasattr(os, 'getresuid'): + def test_getresuid(self): + def f(): + a, b, c = os.getresuid() + return a + b * 37 + c * 1291 + res = self.interpret(f, []) + a, b, c = os.getresuid() + assert res == a + b * 37 + c * 1291 + + if hasattr(os, 'getresgid'): + def test_getresgid(self): + def f(): + a, b, c = os.getresgid() + return a + b * 37 + c * 1291 + res = self.interpret(f, []) + a, b, c = os.getresgid() + assert res == a + b * 37 + c * 1291 From noreply at buildbot.pypy.org Sat Nov 9 17:24:37 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 17:24:37 +0100 (CET) Subject: [pypy-commit] pypy default: os.setresuid(), os.setresgid() Message-ID: <20131109162437.813661C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67897:b4036daf7365 Date: 2013-11-09 16:03 +0100 http://bitbucket.org/pypy/pypy/changeset/b4036daf7365/ Log: os.setresuid(), os.setresgid() diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -157,7 +157,7 @@ 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs', 'setgroups', 'initgroups', 'tcgetpgrp', 'tcsetpgrp', - 'getresuid', 'getresgid']: + 'getresuid', 'getresgid', 'setresuid', 'setresgid']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) # not visible via os, inconsistency in nt: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1170,6 +1170,28 @@ space.wrap(egid), space.wrap(sgid)]) + at unwrap_spec(ruid=c_uid_t, euid=c_uid_t, suid=c_uid_t) +def setresuid(space, ruid, euid, suid): + """ setresuid(ruid, euid, suid) + + Set the current process's real, effective, and saved user ids. + """ + try: + os.setresuid(ruid, euid, suid) + except OSError, e: + raise wrap_oserror(space, e) + + at unwrap_spec(rgid=c_gid_t, egid=c_gid_t, sgid=c_gid_t) +def setresgid(space, rgid, egid, sgid): + """ setresgid(rgid, egid, sgid) + + Set the current process's real, effective, and saved group ids. + """ + try: + os.setresgid(rgid, egid, sgid) + except OSError, e: + raise wrap_oserror(space, e) + def declare_new_w_star(name): if name in RegisterOs.w_star_returning_int: @unwrap_spec(status=c_int) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -670,6 +670,18 @@ res = os.getresgid() assert len(res) == 3 + if hasattr(os, 'setresuid'): + def test_os_setresuid(self): + os = self.posix + a, b, c = os.getresuid() + os.setresuid(a, b, c) + + if hasattr(os, 'setresgid'): + def test_os_setresgid(self): + os = self.posix + a, b, c = os.getresgid() + os.setresgid(a, b, c) + if hasattr(os, 'sysconf'): def test_os_sysconf(self): os = self.posix diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -878,6 +878,32 @@ return extdef([], (int, int, int), llimpl=c_getresgid_llimpl, export_name='ll_os.ll_os_getresgid') + @registering_if(os, 'setresuid') + def register_os_setresuid(self): + c_setresuid = self.llexternal('setresuid', [rffi.INT] * 3, rffi.INT) + + def c_setresuid_llimpl(ruid, euid, suid): + res = c_setresuid(ruid, euid, suid) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "setresuid failed") + + return extdef([int, int, int], None, llimpl=c_setresuid_llimpl, + export_name='ll_os.ll_os_setresuid') + + @registering_if(os, 'setresgid') + def register_os_setresgid(self): + c_setresgid = self.llexternal('setresgid', [rffi.INT] * 3, rffi.INT) + + def c_setresgid_llimpl(rgid, egid, sgid): + res = c_setresgid(rgid, egid, sgid) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "setresgid failed") + + return extdef([int, int, int], None, llimpl=c_setresgid_llimpl, + export_name='ll_os.ll_os_setresgid') + @registering_str_unicode(os.open) def register_os_open(self, traits): os_open = self.llexternal(traits.posix_function_name('open'), diff --git a/rpython/rtyper/module/test/test_posix.py b/rpython/rtyper/module/test/test_posix.py --- a/rpython/rtyper/module/test/test_posix.py +++ b/rpython/rtyper/module/test/test_posix.py @@ -264,3 +264,19 @@ res = self.interpret(f, []) a, b, c = os.getresgid() assert res == a + b * 37 + c * 1291 + + if hasattr(os, 'setresuid'): + def test_setresuid(self): + def f(): + a, b, c = os.getresuid() + a = (a + 1) - 1 + os.setresuid(a, b, c) + self.interpret(f, []) + + if hasattr(os, 'setresgid'): + def test_setresgid(self): + def f(): + a, b, c = os.getresgid() + a = (a + 1) - 1 + os.setresgid(a, b, c) + self.interpret(f, []) From noreply at buildbot.pypy.org Sat Nov 9 17:24:38 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 17:24:38 +0100 (CET) Subject: [pypy-commit] pypy default: os.confstr() Message-ID: <20131109162438.9F93B1C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67898:d3fc81a3142a Date: 2013-11-09 16:26 +0100 http://bitbucket.org/pypy/pypy/changeset/d3fc81a3142a/ Log: os.confstr() diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -131,6 +131,9 @@ if hasattr(os, 'fpathconf'): interpleveldefs['fpathconf'] = 'interp_posix.fpathconf' interpleveldefs['pathconf_names'] = 'space.wrap(os.pathconf_names)' + if hasattr(os, 'confstr'): + interpleveldefs['confstr'] = 'interp_posix.confstr' + interpleveldefs['confstr_names'] = 'space.wrap(os.confstr_names)' if hasattr(os, 'ttyname'): interpleveldefs['ttyname'] = 'interp_posix.ttyname' if hasattr(os, 'getloadavg'): diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1233,15 +1233,28 @@ def sysconf(space, w_name): num = confname_w(space, w_name, os.sysconf_names) - return space.wrap(os.sysconf(num)) + try: + res = os.sysconf(num) + except OSError, e: + raise wrap_oserror(space, e) + return space.wrap(res) @unwrap_spec(fd=c_int) def fpathconf(space, fd, w_name): num = confname_w(space, w_name, os.pathconf_names) try: - return space.wrap(os.fpathconf(fd, num)) + res = os.fpathconf(fd, num) except OSError, e: raise wrap_oserror(space, e) + return space.wrap(res) + +def confstr(space, w_name): + num = confname_w(space, w_name, os.confstr_names) + try: + res = os.confstr(num) + except OSError, e: + raise wrap_oserror(space, e) + return space.wrap(res) @unwrap_spec(path='str0', uid=c_uid_t, gid=c_gid_t) def chown(space, path, uid, gid): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -78,6 +78,11 @@ cls.w_sysconf_name = space.wrap(sysconf_name) cls.w_sysconf_value = space.wrap(os.sysconf_names[sysconf_name]) cls.w_sysconf_result = space.wrap(os.sysconf(sysconf_name)) + if hasattr(os, 'confstr'): + confstr_name = os.confstr_names.keys()[0] + cls.w_confstr_name = space.wrap(confstr_name) + cls.w_confstr_value = space.wrap(os.confstr_names[confstr_name]) + cls.w_confstr_result = space.wrap(os.confstr(confstr_name)) cls.w_SIGABRT = space.wrap(signal.SIGABRT) cls.w_python = space.wrap(sys.executable) if hasattr(os, 'major'): @@ -700,6 +705,17 @@ raises(OSError, os.fpathconf, -1, "PC_PIPE_BUF") raises(ValueError, os.fpathconf, 1, "##") + if hasattr(os, 'confstr'): + def test_os_confstr(self): + os = self.posix + assert os.confstr(self.confstr_value) == self.confstr_result + assert os.confstr(self.confstr_name) == self.confstr_result + assert os.confstr_names[self.confstr_name] == self.confstr_value + + def test_os_confstr_error(self): + os = self.posix + raises(ValueError, os.confstr, "!@#$%!#$!@#") + if hasattr(os, 'wait'): def test_os_wait(self): os = self.posix diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -651,6 +651,30 @@ return extdef([int, int], int, "ll_os.ll_fpathconf", llimpl=fpathconf_llimpl) + @registering_if(os, 'confstr') + def register_os_confstr(self): + c_confstr = self.llexternal('confstr', [rffi.INT, rffi.CCHARP, + rffi.SIZE_T], rffi.SIZE_T) + + def confstr_llimpl(i): + rposix.set_errno(0) + n = c_confstr(i, lltype.nullptr(rffi.CCHARP.TO), 0) + n = rffi.cast(lltype.Signed, n) + if n > 0: + buf = lltype.malloc(rffi.CCHARP.TO, n, flavor='raw') + try: + c_confstr(i, buf, n) + return rffi.charp2strn(buf, n) + finally: + lltype.free(buf, flavor='raw') + else: + errno = rposix.get_errno() + if errno != 0: + raise OSError(errno, "confstr failed") + return None + return extdef([int], SomeString(can_be_None=True), + "ll_os.ll_confstr", llimpl=confstr_llimpl) + @registering_if(os, 'getuid') def register_os_getuid(self): return self.extdef_for_os_function_returning_int('getuid') diff --git a/rpython/rtyper/module/test/test_posix.py b/rpython/rtyper/module/test/test_posix.py --- a/rpython/rtyper/module/test/test_posix.py +++ b/rpython/rtyper/module/test/test_posix.py @@ -1,5 +1,6 @@ import py from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.rtyper.annlowlevel import hlstr from rpython.tool.udir import udir from rpython.rlib.rarithmetic import is_valid_int @@ -176,6 +177,19 @@ return os.sysconf(i) assert self.interpret(f, [13]) == f(13) + if hasattr(os, 'confstr'): + def test_os_confstr(self): + def f(i): + try: + return os.confstr(i) + except OSError: + return "oooops!!" + some_value = os.confstr_names.values()[-1] + res = self.interpret(f, [some_value]) + assert hlstr(res) == f(some_value) + res = self.interpret(f, [94781413]) + assert hlstr(res) == "oooops!!" + if hasattr(os, 'chroot'): def test_os_chroot(self): def f(): From noreply at buildbot.pypy.org Sat Nov 9 17:24:39 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 17:24:39 +0100 (CET) Subject: [pypy-commit] pypy default: os.pathconf() Message-ID: <20131109162439.B898C1C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67899:166ca52b236d Date: 2013-11-09 16:37 +0100 http://bitbucket.org/pypy/pypy/changeset/166ca52b236d/ Log: os.pathconf() diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -131,6 +131,8 @@ if hasattr(os, 'fpathconf'): interpleveldefs['fpathconf'] = 'interp_posix.fpathconf' interpleveldefs['pathconf_names'] = 'space.wrap(os.pathconf_names)' + if hasattr(os, 'pathconf'): + interpleveldefs['pathconf'] = 'interp_posix.pathconf' if hasattr(os, 'confstr'): interpleveldefs['confstr'] = 'interp_posix.confstr' interpleveldefs['confstr_names'] = 'space.wrap(os.confstr_names)' diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1248,6 +1248,15 @@ raise wrap_oserror(space, e) return space.wrap(res) + at unwrap_spec(path='str0') +def pathconf(space, path, w_name): + num = confname_w(space, w_name, os.pathconf_names) + try: + res = os.pathconf(path, num) + except OSError, e: + raise wrap_oserror(space, e) + return space.wrap(res) + def confstr(space, w_name): num = confname_w(space, w_name, os.confstr_names) try: diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -705,6 +705,14 @@ raises(OSError, os.fpathconf, -1, "PC_PIPE_BUF") raises(ValueError, os.fpathconf, 1, "##") + if hasattr(os, 'pathconf'): + def test_os_pathconf(self): + os = self.posix + assert os.pathconf("/tmp", "PC_NAME_MAX") >= 31 + # Linux: the following gets 'No such file or directory' + raises(OSError, os.pathconf, "", "PC_PIPE_BUF") + raises(ValueError, os.pathconf, "/tmp", "##") + if hasattr(os, 'confstr'): def test_os_confstr(self): os = self.posix diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -651,6 +651,22 @@ return extdef([int, int], int, "ll_os.ll_fpathconf", llimpl=fpathconf_llimpl) + @registering_if(os, 'pathconf') + def register_os_pathconf(self): + c_pathconf = self.llexternal('pathconf', + [rffi.CCHARP, rffi.INT], rffi.LONG) + + def pathconf_llimpl(path, i): + rposix.set_errno(0) + res = c_pathconf(path, i) + if res == -1: + errno = rposix.get_errno() + if errno != 0: + raise OSError(errno, "pathconf failed") + return res + return extdef([str0, int], int, "ll_os.ll_pathconf", + llimpl=pathconf_llimpl) + @registering_if(os, 'confstr') def register_os_confstr(self): c_confstr = self.llexternal('confstr', [rffi.INT, rffi.CCHARP, diff --git a/rpython/rtyper/module/test/test_posix.py b/rpython/rtyper/module/test/test_posix.py --- a/rpython/rtyper/module/test/test_posix.py +++ b/rpython/rtyper/module/test/test_posix.py @@ -190,6 +190,14 @@ res = self.interpret(f, [94781413]) assert hlstr(res) == "oooops!!" + if hasattr(os, 'pathconf'): + def test_os_pathconf(self): + def f(i): + return os.pathconf("/tmp", i) + i = os.pathconf_names["PC_NAME_MAX"] + some_value = self.interpret(f, [i]) + assert some_value >= 31 + if hasattr(os, 'chroot'): def test_os_chroot(self): def f(): From noreply at buildbot.pypy.org Sat Nov 9 17:34:48 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 17:34:48 +0100 (CET) Subject: [pypy-commit] pypy default: Support ptradd() with types different than CCHARP. Message-ID: <20131109163448.DFC281C030D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67900:e12176e1a61c Date: 2013-11-09 17:33 +0100 http://bitbucket.org/pypy/pypy/changeset/e12176e1a61c/ Log: Support ptradd() with types different than CCHARP. diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1162,10 +1162,19 @@ v_result) def rewrite_op_direct_ptradd(self, op): - # xxx otherwise, not implemented: - assert op.args[0].concretetype == rffi.CCHARP + v_shift = op.args[1] + assert v_shift.concretetype == lltype.Signed + ops = [] # - return SpaceOperation('int_add', [op.args[0], op.args[1]], op.result) + if op.args[0].concretetype != rffi.CCHARP: + v_prod = varoftype(lltype.Signed) + by = llmemory.sizeof(op.args[0].concretetype.TO.OF) + c_by = Constant(by, lltype.Signed) + ops.append(SpaceOperation('int_mul', [v_shift, c_by], v_prod)) + v_shift = v_prod + # + ops.append(SpaceOperation('int_add', [op.args[0], v_shift], op.result)) + return ops # ---------- # Long longs, for 32-bit only. Supported operations are left unmodified, diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -993,6 +993,16 @@ int_return %i2 """, transform=True) + def test_direct_ptradd_2(self): + def f(p, n): + return lltype.direct_ptradd(p, n + 2) + self.encoding_test(f, [lltype.nullptr(rffi.SHORTP.TO), 123], """ + int_add %i1, $2 -> %i2 + int_mul %i2, $ 1> -> %i3 + int_add %i0, %i3 -> %i4 + int_return %i4 + """, transform=True) + def test_convert_float_bytes(self): from rpython.rlib.longlong2float import float2longlong, longlong2float def f(x): From noreply at buildbot.pypy.org Sat Nov 9 17:38:14 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 9 Nov 2013 17:38:14 +0100 (CET) Subject: [pypy-commit] pypy default: Unroll ffi.new("struct s *", [a, list]) or ffi.new("struct s *", {a: dict}) Message-ID: <20131109163814.DF9DE1C030D@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67901:988070ca4092 Date: 2013-11-09 08:36 -0800 http://bitbucket.org/pypy/pypy/changeset/988070ca4092/ Log: Unroll ffi.new("struct s *", [a, list]) or ffi.new("struct s *", {a: dict}) diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -95,6 +95,9 @@ if not self._copy_from_same(cdata, w_ob): self.convert_struct_from_object(cdata, w_ob) + @jit.look_inside_iff( + lambda self, cdata, w_ob, optvarsize=-1: jit.isvirtual(w_ob) + ) def convert_struct_from_object(self, cdata, w_ob, optvarsize=-1): self._check_only_one_argument_for_union(w_ob) diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -228,7 +228,7 @@ except ImportError: sys.stderr.write('SKIP: cannot import cffi\n') return 0 - + ffi = cffi.FFI() ffi.cdef(""" @@ -301,5 +301,30 @@ f(1) # libm_name = get_libm_name(sys.platform) - log = self.run(main, [libm_name]) + self.run(main, [libm_name]) # assert did not crash + + def test_cffi_init_struct_with_list(self): + def main(n): + import sys + try: + import cffi + except ImportError: + sys.stderr.write('SKIP: cannot import cffi\n') + return 0 + + ffi = cffi.FFI() + ffi.cdef(""" + struct s { + int x; + int y; + int z; + }; + """) + + for i in xrange(n): + ffi.new("struct s *", [i, i, i]) + + log = self.run(main, [300]) + loop, = log.loops_by_filename(self.filepath) + assert False, "XXX: fill this in" From noreply at buildbot.pypy.org Sat Nov 9 17:38:16 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 9 Nov 2013 17:38:16 +0100 (CET) Subject: [pypy-commit] pypy default: No longer needed Message-ID: <20131109163816.201851C030D@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67902:e6348d3ebc95 Date: 2013-11-09 08:37 -0800 http://bitbucket.org/pypy/pypy/changeset/e6348d3ebc95/ Log: No longer needed diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -868,9 +868,6 @@ self.buffer[i] = w_item.buffer[j] j += 1 - # We can't look into this function until ptradd works with things (in the - # JIT) other than rffi.CCHARP - @jit.dont_look_inside def delitem(self, space, i, j): if i < 0: i += self.len @@ -906,7 +903,6 @@ lltype.free(oldbuffer, flavor='raw') # Add and mul methods - def descr_add(self, space, w_other): if not isinstance(w_other, W_Array): return space.w_NotImplemented From noreply at buildbot.pypy.org Sat Nov 9 17:38:17 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 9 Nov 2013 17:38:17 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20131109163817.50E1A1C030D@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67903:040be31ca85a Date: 2013-11-09 08:37 -0800 http://bitbucket.org/pypy/pypy/changeset/040be31ca85a/ Log: merged upstream diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -131,6 +131,11 @@ if hasattr(os, 'fpathconf'): interpleveldefs['fpathconf'] = 'interp_posix.fpathconf' interpleveldefs['pathconf_names'] = 'space.wrap(os.pathconf_names)' + if hasattr(os, 'pathconf'): + interpleveldefs['pathconf'] = 'interp_posix.pathconf' + if hasattr(os, 'confstr'): + interpleveldefs['confstr'] = 'interp_posix.confstr' + interpleveldefs['confstr_names'] = 'space.wrap(os.confstr_names)' if hasattr(os, 'ttyname'): interpleveldefs['ttyname'] = 'interp_posix.ttyname' if hasattr(os, 'getloadavg'): @@ -155,7 +160,9 @@ for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', - 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs']: + 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs', + 'setgroups', 'initgroups', 'tcgetpgrp', 'tcsetpgrp', + 'getresuid', 'getresgid', 'setresuid', 'setresgid']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) # not visible via os, inconsistency in nt: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -987,7 +987,39 @@ Return list of supplemental group IDs for the process. """ - return space.newlist([space.wrap(e) for e in os.getgroups()]) + try: + list = os.getgroups() + except OSError, e: + raise wrap_oserror(space, e) + return space.newlist([space.wrap(e) for e in list]) + +def setgroups(space, w_list): + """ setgroups(list) + + Set the groups of the current process to list. + """ + list = [] + for w_gid in space.unpackiterable(w_list): + gid = space.int_w(w_gid) + check_uid_range(space, gid) + list.append(gid) + try: + os.setgroups(list[:]) + except OSError, e: + raise wrap_oserror(space, e) + + at unwrap_spec(username=str, gid=c_gid_t) +def initgroups(space, username, gid): + """ initgroups(username, gid) -> None + + Call the system initgroups() to initialize the group access list with all of + the groups of which the specified username is a member, plus the specified + group id. + """ + try: + os.initgroups(username, gid) + except OSError, e: + raise wrap_oserror(space, e) def getpgrp(space): """ getpgrp() -> pgrp @@ -1089,6 +1121,77 @@ raise wrap_oserror(space, e) return space.w_None + at unwrap_spec(fd=c_int) +def tcgetpgrp(space, fd): + """ tcgetpgrp(fd) -> pgid + + Return the process group associated with the terminal given by a fd. + """ + try: + pgid = os.tcgetpgrp(fd) + except OSError, e: + raise wrap_oserror(space, e) + return space.wrap(pgid) + + at unwrap_spec(fd=c_int, pgid=c_gid_t) +def tcsetpgrp(space, fd, pgid): + """ tcsetpgrp(fd, pgid) + + Set the process group associated with the terminal given by a fd. + """ + try: + os.tcsetpgrp(fd, pgid) + except OSError, e: + raise wrap_oserror(space, e) + +def getresuid(space): + """ getresuid() -> (ruid, euid, suid) + + Get tuple of the current process's real, effective, and saved user ids. + """ + try: + (ruid, euid, suid) = os.getresuid() + except OSError, e: + raise wrap_oserror(space, e) + return space.newtuple([space.wrap(ruid), + space.wrap(euid), + space.wrap(suid)]) + +def getresgid(space): + """ getresgid() -> (rgid, egid, sgid) + + Get tuple of the current process's real, effective, and saved group ids. + """ + try: + (rgid, egid, sgid) = os.getresgid() + except OSError, e: + raise wrap_oserror(space, e) + return space.newtuple([space.wrap(rgid), + space.wrap(egid), + space.wrap(sgid)]) + + at unwrap_spec(ruid=c_uid_t, euid=c_uid_t, suid=c_uid_t) +def setresuid(space, ruid, euid, suid): + """ setresuid(ruid, euid, suid) + + Set the current process's real, effective, and saved user ids. + """ + try: + os.setresuid(ruid, euid, suid) + except OSError, e: + raise wrap_oserror(space, e) + + at unwrap_spec(rgid=c_gid_t, egid=c_gid_t, sgid=c_gid_t) +def setresgid(space, rgid, egid, sgid): + """ setresgid(rgid, egid, sgid) + + Set the current process's real, effective, and saved group ids. + """ + try: + os.setresgid(rgid, egid, sgid) + except OSError, e: + raise wrap_oserror(space, e) + def declare_new_w_star(name): if name in RegisterOs.w_star_returning_int: @unwrap_spec(status=c_int) @@ -1130,15 +1233,37 @@ def sysconf(space, w_name): num = confname_w(space, w_name, os.sysconf_names) - return space.wrap(os.sysconf(num)) + try: + res = os.sysconf(num) + except OSError, e: + raise wrap_oserror(space, e) + return space.wrap(res) @unwrap_spec(fd=c_int) def fpathconf(space, fd, w_name): num = confname_w(space, w_name, os.pathconf_names) try: - return space.wrap(os.fpathconf(fd, num)) + res = os.fpathconf(fd, num) except OSError, e: raise wrap_oserror(space, e) + return space.wrap(res) + + at unwrap_spec(path='str0') +def pathconf(space, path, w_name): + num = confname_w(space, w_name, os.pathconf_names) + try: + res = os.pathconf(path, num) + except OSError, e: + raise wrap_oserror(space, e) + return space.wrap(res) + +def confstr(space, w_name): + num = confname_w(space, w_name, os.confstr_names) + try: + res = os.confstr(num) + except OSError, e: + raise wrap_oserror(space, e) + return space.wrap(res) @unwrap_spec(path='str0', uid=c_uid_t, gid=c_gid_t) def chown(space, path, uid, gid): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -78,6 +78,11 @@ cls.w_sysconf_name = space.wrap(sysconf_name) cls.w_sysconf_value = space.wrap(os.sysconf_names[sysconf_name]) cls.w_sysconf_result = space.wrap(os.sysconf(sysconf_name)) + if hasattr(os, 'confstr'): + confstr_name = os.confstr_names.keys()[0] + cls.w_confstr_name = space.wrap(confstr_name) + cls.w_confstr_value = space.wrap(os.confstr_names[confstr_name]) + cls.w_confstr_result = space.wrap(os.confstr(confstr_name)) cls.w_SIGABRT = space.wrap(signal.SIGABRT) cls.w_python = space.wrap(sys.executable) if hasattr(os, 'major'): @@ -616,6 +621,30 @@ os = self.posix assert os.getgroups() == self.getgroups + if hasattr(os, 'setgroups'): + def test_os_setgroups(self): + os = self.posix + raises(TypeError, os.setgroups, [2, 5, "hello"]) + try: + os.setgroups(os.getgroups()) + except OSError: + pass + + if hasattr(os, 'initgroups'): + def test_os_initgroups(self): + os = self.posix + raises(OSError, os.initgroups, "crW2hTQC", 100) + + if hasattr(os, 'tcgetpgrp'): + def test_os_tcgetpgrp(self): + os = self.posix + raises(OSError, os.tcgetpgrp, 9999) + + if hasattr(os, 'tcsetpgrp'): + def test_os_tcsetpgrp(self): + os = self.posix + raises(OSError, os.tcsetpgrp, 9999, 1) + if hasattr(os, 'getpgid'): def test_os_getpgid(self): os = self.posix @@ -634,6 +663,30 @@ assert os.getsid(0) == self.getsid0 raises(OSError, os.getsid, -100000) + if hasattr(os, 'getresuid'): + def test_os_getresuid(self): + os = self.posix + res = os.getresuid() + assert len(res) == 3 + + if hasattr(os, 'getresgid'): + def test_os_getresgid(self): + os = self.posix + res = os.getresgid() + assert len(res) == 3 + + if hasattr(os, 'setresuid'): + def test_os_setresuid(self): + os = self.posix + a, b, c = os.getresuid() + os.setresuid(a, b, c) + + if hasattr(os, 'setresgid'): + def test_os_setresgid(self): + os = self.posix + a, b, c = os.getresgid() + os.setresgid(a, b, c) + if hasattr(os, 'sysconf'): def test_os_sysconf(self): os = self.posix @@ -652,6 +705,25 @@ raises(OSError, os.fpathconf, -1, "PC_PIPE_BUF") raises(ValueError, os.fpathconf, 1, "##") + if hasattr(os, 'pathconf'): + def test_os_pathconf(self): + os = self.posix + assert os.pathconf("/tmp", "PC_NAME_MAX") >= 31 + # Linux: the following gets 'No such file or directory' + raises(OSError, os.pathconf, "", "PC_PIPE_BUF") + raises(ValueError, os.pathconf, "/tmp", "##") + + if hasattr(os, 'confstr'): + def test_os_confstr(self): + os = self.posix + assert os.confstr(self.confstr_value) == self.confstr_result + assert os.confstr(self.confstr_name) == self.confstr_result + assert os.confstr_names[self.confstr_name] == self.confstr_value + + def test_os_confstr_error(self): + os = self.posix + raises(ValueError, os.confstr, "!@#$%!#$!@#") + if hasattr(os, 'wait'): def test_os_wait(self): os = self.posix diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1162,10 +1162,19 @@ v_result) def rewrite_op_direct_ptradd(self, op): - # xxx otherwise, not implemented: - assert op.args[0].concretetype == rffi.CCHARP + v_shift = op.args[1] + assert v_shift.concretetype == lltype.Signed + ops = [] # - return SpaceOperation('int_add', [op.args[0], op.args[1]], op.result) + if op.args[0].concretetype != rffi.CCHARP: + v_prod = varoftype(lltype.Signed) + by = llmemory.sizeof(op.args[0].concretetype.TO.OF) + c_by = Constant(by, lltype.Signed) + ops.append(SpaceOperation('int_mul', [v_shift, c_by], v_prod)) + v_shift = v_prod + # + ops.append(SpaceOperation('int_add', [op.args[0], v_shift], op.result)) + return ops # ---------- # Long longs, for 32-bit only. Supported operations are left unmodified, diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -993,6 +993,16 @@ int_return %i2 """, transform=True) + def test_direct_ptradd_2(self): + def f(p, n): + return lltype.direct_ptradd(p, n + 2) + self.encoding_test(f, [lltype.nullptr(rffi.SHORTP.TO), 123], """ + int_add %i1, $2 -> %i2 + int_mul %i2, $ 1> -> %i3 + int_add %i0, %i3 -> %i4 + int_return %i4 + """, transform=True) + def test_convert_float_bytes(self): from rpython.rlib.longlong2float import float2longlong, longlong2float def f(x): diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -129,10 +129,6 @@ ('tms_cutime', rffi.INT), ('tms_cstime', rffi.INT)]) - GID_T = platform.SimpleType('gid_t', rffi.INT) - #TODO right now is used only in getgroups, may need to update other - #functions like setgid - # For now we require off_t to be the same size as LONGLONG, which is the # interface required by callers of functions that thake an argument of type # off_t @@ -655,6 +651,46 @@ return extdef([int, int], int, "ll_os.ll_fpathconf", llimpl=fpathconf_llimpl) + @registering_if(os, 'pathconf') + def register_os_pathconf(self): + c_pathconf = self.llexternal('pathconf', + [rffi.CCHARP, rffi.INT], rffi.LONG) + + def pathconf_llimpl(path, i): + rposix.set_errno(0) + res = c_pathconf(path, i) + if res == -1: + errno = rposix.get_errno() + if errno != 0: + raise OSError(errno, "pathconf failed") + return res + return extdef([str0, int], int, "ll_os.ll_pathconf", + llimpl=pathconf_llimpl) + + @registering_if(os, 'confstr') + def register_os_confstr(self): + c_confstr = self.llexternal('confstr', [rffi.INT, rffi.CCHARP, + rffi.SIZE_T], rffi.SIZE_T) + + def confstr_llimpl(i): + rposix.set_errno(0) + n = c_confstr(i, lltype.nullptr(rffi.CCHARP.TO), 0) + n = rffi.cast(lltype.Signed, n) + if n > 0: + buf = lltype.malloc(rffi.CCHARP.TO, n, flavor='raw') + try: + c_confstr(i, buf, n) + return rffi.charp2strn(buf, n) + finally: + lltype.free(buf, flavor='raw') + else: + errno = rposix.get_errno() + if errno != 0: + raise OSError(errno, "confstr failed") + return None + return extdef([int], SomeString(can_be_None=True), + "ll_os.ll_confstr", llimpl=confstr_llimpl) + @registering_if(os, 'getuid') def register_os_getuid(self): return self.extdef_for_os_function_returning_int('getuid') @@ -693,7 +729,7 @@ @registering_if(os, 'getgroups') def register_os_getgroups(self): - GP = rffi.CArrayPtr(self.GID_T) + GP = rffi.CArrayPtr(rffi.PID_T) c_getgroups = self.llexternal('getgroups', [rffi.INT, GP], rffi.INT) def getgroups_llimpl(): @@ -702,16 +738,50 @@ groups = lltype.malloc(GP.TO, n, flavor='raw') try: n = c_getgroups(n, groups) - result = [groups[i] for i in range(n)] + result = [rffi.cast(lltype.Signed, groups[i]) + for i in range(n)] finally: lltype.free(groups, flavor='raw') if n >= 0: return result raise OSError(rposix.get_errno(), "os_getgroups failed") - return extdef([], [self.GID_T], llimpl=getgroups_llimpl, + return extdef([], [int], llimpl=getgroups_llimpl, export_name="ll_os.ll_getgroups") + @registering_if(os, 'setgroups') + def register_os_setgroups(self): + GP = rffi.CArrayPtr(rffi.PID_T) + c_setgroups = self.llexternal('setgroups', [rffi.SIZE_T, GP], rffi.INT) + + def setgroups_llimpl(list): + n = len(list) + groups = lltype.malloc(GP.TO, n, flavor='raw') + try: + for i in range(n): + groups[i] = rffi.cast(rffi.PID_T, list[i]) + n = c_setgroups(rffi.cast(rffi.SIZE_T, n), groups) + finally: + lltype.free(groups, flavor='raw') + if n != 0: + raise OSError(rposix.get_errno(), "os_setgroups failed") + + return extdef([[int]], None, llimpl=setgroups_llimpl, + export_name="ll_os.ll_setgroups") + + @registering_if(os, 'initgroups') + def register_os_initgroups(self): + c_initgroups = self.llexternal('initgroups', + [rffi.CCHARP, rffi.PID_T], rffi.INT) + + def initgroups_llimpl(user, group): + n = c_initgroups(user, rffi.cast(rffi.PID_T, group)) + if n != 0: + raise OSError(rposix.get_errno(), "os_initgroups failed") + + return extdef([str, int], None, llimpl=initgroups_llimpl, + export_name="ll_os.ll_initgroups") + @registering_if(os, 'getpgrp') def register_os_getpgrp(self): name = 'getpgrp' @@ -747,6 +817,35 @@ else: return self.extdef_for_os_function_accepting_0int(name) + @registering_if(os, 'tcgetpgrp') + def register_os_tcgetpgrp(self): + c_tcgetpgrp = self.llexternal('tcgetpgrp', [rffi.INT], rffi.PID_T) + + def c_tcgetpgrp_llimpl(fd): + res = c_tcgetpgrp(rffi.cast(rffi.INT, fd)) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "tcgetpgrp failed") + return res + + return extdef([int], int, llimpl=c_tcgetpgrp_llimpl, + export_name='ll_os.ll_os_tcgetpgrp') + + @registering_if(os, 'tcsetpgrp') + def register_os_tcsetpgrp(self): + c_tcsetpgrp = self.llexternal('tcsetpgrp', [rffi.INT, rffi.PID_T], + rffi.INT) + + def c_tcsetpgrp_llimpl(fd, pgrp): + res = c_tcsetpgrp(rffi.cast(rffi.INT, fd), + rffi.cast(rffi.PID_T, pgrp)) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "tcsetpgrp failed") + + return extdef([int, int], None, llimpl=c_tcsetpgrp_llimpl, + export_name='ll_os.ll_os_tcsetpgrp') + @registering_if(os, 'getppid') def register_os_getppid(self): return self.extdef_for_os_function_returning_int('getppid') @@ -775,6 +874,76 @@ def register_os_setsid(self): return self.extdef_for_os_function_returning_int('setsid') + @registering_if(os, 'getresuid') + def register_os_getresuid(self): + c_getresuid = self.llexternal('getresuid', [rffi.INTP] * 3, rffi.INT) + + def c_getresuid_llimpl(): + out = lltype.malloc(rffi.INTP.TO, 3, flavor='raw') + try: + res = c_getresuid(rffi.ptradd(out, 0), + rffi.ptradd(out, 1), + rffi.ptradd(out, 2)) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "getresuid failed") + return (rffi.cast(lltype.Signed, out[0]), + rffi.cast(lltype.Signed, out[1]), + rffi.cast(lltype.Signed, out[2])) + finally: + lltype.free(out, flavor='raw') + + return extdef([], (int, int, int), llimpl=c_getresuid_llimpl, + export_name='ll_os.ll_os_getresuid') + + @registering_if(os, 'getresgid') + def register_os_getresgid(self): + c_getresgid = self.llexternal('getresgid', [rffi.INTP] * 3, rffi.INT) + + def c_getresgid_llimpl(): + out = lltype.malloc(rffi.INTP.TO, 3, flavor='raw') + try: + res = c_getresgid(rffi.ptradd(out, 0), + rffi.ptradd(out, 1), + rffi.ptradd(out, 2)) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "getresgid failed") + return (rffi.cast(lltype.Signed, out[0]), + rffi.cast(lltype.Signed, out[1]), + rffi.cast(lltype.Signed, out[2])) + finally: + lltype.free(out, flavor='raw') + + return extdef([], (int, int, int), llimpl=c_getresgid_llimpl, + export_name='ll_os.ll_os_getresgid') + + @registering_if(os, 'setresuid') + def register_os_setresuid(self): + c_setresuid = self.llexternal('setresuid', [rffi.INT] * 3, rffi.INT) + + def c_setresuid_llimpl(ruid, euid, suid): + res = c_setresuid(ruid, euid, suid) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "setresuid failed") + + return extdef([int, int, int], None, llimpl=c_setresuid_llimpl, + export_name='ll_os.ll_os_setresuid') + + @registering_if(os, 'setresgid') + def register_os_setresgid(self): + c_setresgid = self.llexternal('setresgid', [rffi.INT] * 3, rffi.INT) + + def c_setresgid_llimpl(rgid, egid, sgid): + res = c_setresgid(rgid, egid, sgid) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "setresgid failed") + + return extdef([int, int, int], None, llimpl=c_setresgid_llimpl, + export_name='ll_os.ll_os_setresgid') + @registering_str_unicode(os.open) def register_os_open(self, traits): os_open = self.llexternal(traits.posix_function_name('open'), diff --git a/rpython/rtyper/module/test/test_posix.py b/rpython/rtyper/module/test/test_posix.py --- a/rpython/rtyper/module/test/test_posix.py +++ b/rpython/rtyper/module/test/test_posix.py @@ -1,5 +1,6 @@ import py from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.rtyper.annlowlevel import hlstr from rpython.tool.udir import udir from rpython.rlib.rarithmetic import is_valid_int @@ -176,6 +177,27 @@ return os.sysconf(i) assert self.interpret(f, [13]) == f(13) + if hasattr(os, 'confstr'): + def test_os_confstr(self): + def f(i): + try: + return os.confstr(i) + except OSError: + return "oooops!!" + some_value = os.confstr_names.values()[-1] + res = self.interpret(f, [some_value]) + assert hlstr(res) == f(some_value) + res = self.interpret(f, [94781413]) + assert hlstr(res) == "oooops!!" + + if hasattr(os, 'pathconf'): + def test_os_pathconf(self): + def f(i): + return os.pathconf("/tmp", i) + i = os.pathconf_names["PC_NAME_MAX"] + some_value = self.interpret(f, [i]) + assert some_value >= 31 + if hasattr(os, 'chroot'): def test_os_chroot(self): def f(): @@ -205,3 +227,78 @@ return os.getgroups() ll_a = self.interpret(f, []) assert self.ll_to_list(ll_a) == f() + + if hasattr(os, 'setgroups'): + def test_setgroups(self): + def f(): + try: + os.setgroups(os.getgroups()) + except OSError: + pass + self.interpret(f, []) + + if hasattr(os, 'initgroups'): + def test_initgroups(self): + def f(): + try: + os.initgroups('sUJJeumz', 4321) + except OSError: + return 1 + return 0 + res = self.interpret(f, []) + assert res == 1 + + if hasattr(os, 'tcgetpgrp'): + def test_tcgetpgrp(self): + def f(fd): + try: + return os.tcgetpgrp(fd) + except OSError: + return 42 + res = self.interpret(f, [9999]) + assert res == 42 + + if hasattr(os, 'tcsetpgrp'): + def test_tcsetpgrp(self): + def f(fd, pgrp): + try: + os.tcsetpgrp(fd, pgrp) + except OSError: + return 1 + return 0 + res = self.interpret(f, [9999, 1]) + assert res == 1 + + if hasattr(os, 'getresuid'): + def test_getresuid(self): + def f(): + a, b, c = os.getresuid() + return a + b * 37 + c * 1291 + res = self.interpret(f, []) + a, b, c = os.getresuid() + assert res == a + b * 37 + c * 1291 + + if hasattr(os, 'getresgid'): + def test_getresgid(self): + def f(): + a, b, c = os.getresgid() + return a + b * 37 + c * 1291 + res = self.interpret(f, []) + a, b, c = os.getresgid() + assert res == a + b * 37 + c * 1291 + + if hasattr(os, 'setresuid'): + def test_setresuid(self): + def f(): + a, b, c = os.getresuid() + a = (a + 1) - 1 + os.setresuid(a, b, c) + self.interpret(f, []) + + if hasattr(os, 'setresgid'): + def test_setresgid(self): + def f(): + a, b, c = os.getresgid() + a = (a + 1) - 1 + os.setresgid(a, b, c) + self.interpret(f, []) From noreply at buildbot.pypy.org Sat Nov 9 17:41:32 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 9 Nov 2013 17:41:32 +0100 (CET) Subject: [pypy-commit] pypy default: translation fix Message-ID: <20131109164132.259B51C00F8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67904:43529df9afc0 Date: 2013-11-09 08:41 -0800 http://bitbucket.org/pypy/pypy/changeset/43529df9afc0/ Log: translation fix diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -95,12 +95,15 @@ if not self._copy_from_same(cdata, w_ob): self.convert_struct_from_object(cdata, w_ob) - @jit.look_inside_iff( - lambda self, cdata, w_ob, optvarsize=-1: jit.isvirtual(w_ob) - ) def convert_struct_from_object(self, cdata, w_ob, optvarsize=-1): self._check_only_one_argument_for_union(w_ob) + self._convert_struct_from_object(cdata, w_ob, optvarsize) + # XXX: needed because look_inside_iff doesn't like default args + @jit.look_inside_iff( + lambda self, cdata, w_ob, optvarsize: jit.isvirtual(w_ob) + ) + def _convert_struct_from_object(self, cdata, w_ob, optvarsize): space = self.space if (space.isinstance_w(w_ob, space.w_list) or space.isinstance_w(w_ob, space.w_tuple)): From noreply at buildbot.pypy.org Sat Nov 9 17:45:02 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 9 Nov 2013 17:45:02 +0100 (CET) Subject: [pypy-commit] pypy cpyext-int: merge default into branch Message-ID: <20131109164502.94B2E1C00F8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: cpyext-int Changeset: r67905:6a6f016d87dd Date: 2013-11-09 18:15 +0200 http://bitbucket.org/pypy/pypy/changeset/6a6f016d87dd/ Log: merge default into branch diff too long, truncating to 2000 out of 19494 lines diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,7 +1,38 @@ all: pypy-c +PYPY_EXECUTABLE := $(shell which pypy) +URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") + +ifeq ($(PYPY_EXECUTABLE),) +RUNINTERP = python +else +RUNINTERP = $(PYPY_EXECUTABLE) +endif + pypy-c: - @echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM" - @sleep 3 - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + @echo + @echo "====================================================================" +ifeq ($(PYPY_EXECUTABLE),) + @echo "Building a regular (jitting) version of PyPy, using CPython." + @echo "This takes around 2 hours and $(URAM) GB of RAM." + @echo "Note that pre-installing a PyPy binary would reduce this time" + @echo "and produce basically the same result." +else + @echo "Building a regular (jitting) version of PyPy, using" + @echo "$(PYPY_EXECUTABLE) to run the translation itself." + @echo "This takes up to 1 hour and $(URAM) GB of RAM." +endif + @echo + @echo "For more control (e.g. to use multiple CPU cores during part of" + @echo "the process) you need to run \`\`rpython/bin/rpython'' directly." + @echo "For more information see \`\`http://pypy.org/download.html''." + @echo "====================================================================" + @echo + @sleep 5 + $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + +# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are +# replaced with an opaque --jobserver option by the time this Makefile +# runs. We cannot get their original value either: +# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -1,5 +1,5 @@ ===================================== -PyPy: Python in Python Implementation +PyPy: Python in Python Implementation ===================================== Welcome to PyPy! @@ -26,9 +26,11 @@ Building ======== -build with:: +build with: - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py +.. code-block:: console + + $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest to use virtualenv with the resulting pypy-c as the interpreter, you can diff --git a/lib-python/2.7/string.py b/lib-python/2.7/string.py --- a/lib-python/2.7/string.py +++ b/lib-python/2.7/string.py @@ -66,16 +66,17 @@ must be of the same length. """ - if len(fromstr) != len(tostr): + n = len(fromstr) + if n != len(tostr): raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) + # this function has been rewritten to suit PyPy better; it is + # almost 10x faster than the original. + buf = bytearray(256) + for i in range(256): + buf[i] = i + for i in range(n): + buf[ord(fromstr[i])] = tostr[i] + return str(buf) diff --git a/lib_pypy/numpypy/__init__.py b/lib_pypy/numpypy/__init__.py deleted file mode 100644 --- a/lib_pypy/numpypy/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -from . import core -from .core import * -from . import lib -from .lib import * - -from __builtin__ import bool, int, long, float, complex, object, unicode, str - -from .core import round, abs, max, min - -__version__ = '1.7.0' - -__all__ = ['__version__'] -__all__ += core.__all__ -__all__ += lib.__all__ - -#import sys -#sys.modules.setdefault('numpy', sys.modules['numpypy']) diff --git a/lib_pypy/numpypy/core/__init__.py b/lib_pypy/numpypy/core/__init__.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from . import multiarray -from . import umath -from . import numeric -from .numeric import * -from . import fromnumeric -from .fromnumeric import * -from . import shape_base -from .shape_base import * - -from .fromnumeric import amax as max, amin as min, \ - round_ as round -from .numeric import absolute as abs - -__all__ = [] -__all__ += numeric.__all__ -__all__ += fromnumeric.__all__ -__all__ += shape_base.__all__ diff --git a/lib_pypy/numpypy/core/_methods.py b/lib_pypy/numpypy/core/_methods.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/_methods.py +++ /dev/null @@ -1,109 +0,0 @@ -# Array methods which are called by the both the C-code for the method -# and the Python code for the NumPy-namespace function - -import multiarray as mu -import umath as um -from numeric import asanyarray - -def _amax(a, axis=None, out=None, keepdims=False): - return um.maximum.reduce(a, axis=axis, - out=out, keepdims=keepdims) - -def _amin(a, axis=None, out=None, keepdims=False): - return um.minimum.reduce(a, axis=axis, - out=out, keepdims=keepdims) - -def _sum(a, axis=None, dtype=None, out=None, keepdims=False): - return um.add.reduce(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def _prod(a, axis=None, dtype=None, out=None, keepdims=False): - return um.multiply.reduce(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def _any(a, axis=None, dtype=None, out=None, keepdims=False): - return um.logical_or.reduce(a, axis=axis, dtype=dtype, out=out, - keepdims=keepdims) - -def _all(a, axis=None, dtype=None, out=None, keepdims=False): - return um.logical_and.reduce(a, axis=axis, dtype=dtype, out=out, - keepdims=keepdims) - -def _count_reduce_items(arr, axis): - if axis is None: - axis = tuple(xrange(arr.ndim)) - if not isinstance(axis, tuple): - axis = (axis,) - items = 1 - for ax in axis: - items *= arr.shape[ax] - return items - -def _mean(a, axis=None, dtype=None, out=None, keepdims=False): - arr = asanyarray(a) - - # Upgrade bool, unsigned int, and int to float64 - if dtype is None and arr.dtype.kind in ['b','u','i']: - ret = um.add.reduce(arr, axis=axis, dtype='f8', - out=out, keepdims=keepdims) - else: - ret = um.add.reduce(arr, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - rcount = _count_reduce_items(arr, axis) - if isinstance(ret, mu.ndarray): - ret = um.true_divide(ret, rcount, - out=ret, casting='unsafe', subok=False) - else: - ret = ret / float(rcount) - return ret - -def _var(a, axis=None, dtype=None, out=None, ddof=0, - keepdims=False): - arr = asanyarray(a) - - # First compute the mean, saving 'rcount' for reuse later - if dtype is None and arr.dtype.kind in ['b','u','i']: - arrmean = um.add.reduce(arr, axis=axis, dtype='f8', keepdims=True) - else: - arrmean = um.add.reduce(arr, axis=axis, dtype=dtype, keepdims=True) - rcount = _count_reduce_items(arr, axis) - if isinstance(arrmean, mu.ndarray): - arrmean = um.true_divide(arrmean, rcount, - out=arrmean, casting='unsafe', subok=False) - else: - arrmean = arrmean / float(rcount) - - # arr - arrmean - x = arr - arrmean - - # (arr - arrmean) ** 2 - if arr.dtype.kind == 'c': - x = um.multiply(x, um.conjugate(x), out=x).real - else: - x = um.multiply(x, x, out=x) - - # add.reduce((arr - arrmean) ** 2, axis) - ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - - # add.reduce((arr - arrmean) ** 2, axis) / (n - ddof) - if not keepdims and isinstance(rcount, mu.ndarray): - rcount = rcount.squeeze(axis=axis) - rcount -= ddof - if isinstance(ret, mu.ndarray): - ret = um.true_divide(ret, rcount, - out=ret, casting='unsafe', subok=False) - else: - ret = ret / float(rcount) - - return ret - -def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) - - if isinstance(ret, mu.ndarray): - ret = um.sqrt(ret, out=ret) - else: - ret = um.sqrt(ret) - - return ret diff --git a/lib_pypy/numpypy/core/arrayprint.py b/lib_pypy/numpypy/core/arrayprint.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/arrayprint.py +++ /dev/null @@ -1,751 +0,0 @@ -"""Array printing function - -$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ -""" -__all__ = ["array2string", "set_printoptions", "get_printoptions"] -__docformat__ = 'restructuredtext' - -# -# Written by Konrad Hinsen -# last revision: 1996-3-13 -# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) -# and by Perry Greenfield 2000-4-1 for numarray -# and by Travis Oliphant 2005-8-22 for numpy - -import sys -import numerictypes as _nt -from umath import maximum, minimum, absolute, not_equal, isnan, isinf -#from multiarray import format_longfloat, datetime_as_string, datetime_data -from fromnumeric import ravel - - -def product(x, y): return x*y - -_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension -_summaryThreshold = 1000 # total items > triggers array summarization - -_float_output_precision = 8 -_float_output_suppress_small = False -_line_width = 75 -_nan_str = 'nan' -_inf_str = 'inf' -_formatter = None # formatting function for array elements - -if sys.version_info[0] >= 3: - from functools import reduce - -def set_printoptions(precision=None, threshold=None, edgeitems=None, - linewidth=None, suppress=None, - nanstr=None, infstr=None, - formatter=None): - """ - Set printing options. - - These options determine the way floating point numbers, arrays and - other NumPy objects are displayed. - - Parameters - ---------- - precision : int, optional - Number of digits of precision for floating point output (default 8). - threshold : int, optional - Total number of array elements which trigger summarization - rather than full repr (default 1000). - edgeitems : int, optional - Number of array items in summary at beginning and end of - each dimension (default 3). - linewidth : int, optional - The number of characters per line for the purpose of inserting - line breaks (default 75). - suppress : bool, optional - Whether or not suppress printing of small floating point values - using scientific notation (default False). - nanstr : str, optional - String representation of floating point not-a-number (default nan). - infstr : str, optional - String representation of floating point infinity (default inf). - formatter : dict of callables, optional - If not None, the keys should indicate the type(s) that the respective - formatting function applies to. Callables should return a string. - Types that are not specified (by their corresponding keys) are handled - by the default formatters. Individual types for which a formatter - can be set are:: - - - 'bool' - - 'int' - - 'timedelta' : a `numpy.timedelta64` - - 'datetime' : a `numpy.datetime64` - - 'float' - - 'longfloat' : 128-bit floats - - 'complexfloat' - - 'longcomplexfloat' : composed of two 128-bit floats - - 'numpy_str' : types `numpy.string_` and `numpy.unicode_` - - 'str' : all other strings - - Other keys that can be used to set a group of types at once are:: - - - 'all' : sets all types - - 'int_kind' : sets 'int' - - 'float_kind' : sets 'float' and 'longfloat' - - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' - - See Also - -------- - get_printoptions, set_string_function, array2string - - Notes - ----- - `formatter` is always reset with a call to `set_printoptions`. - - Examples - -------- - Floating point precision can be set: - - >>> np.set_printoptions(precision=4) - >>> print np.array([1.123456789]) - [ 1.1235] - - Long arrays can be summarised: - - >>> np.set_printoptions(threshold=5) - >>> print np.arange(10) - [0 1 2 ..., 7 8 9] - - Small results can be suppressed: - - >>> eps = np.finfo(float).eps - >>> x = np.arange(4.) - >>> x**2 - (x + eps)**2 - array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) - >>> np.set_printoptions(suppress=True) - >>> x**2 - (x + eps)**2 - array([-0., -0., 0., 0.]) - - A custom formatter can be used to display array elements as desired: - - >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) - >>> x = np.arange(3) - >>> x - array([int: 0, int: -1, int: -2]) - >>> np.set_printoptions() # formatter gets reset - >>> x - array([0, 1, 2]) - - To put back the default options, you can use: - - >>> np.set_printoptions(edgeitems=3,infstr='inf', - ... linewidth=75, nanstr='nan', precision=8, - ... suppress=False, threshold=1000, formatter=None) - """ - - global _summaryThreshold, _summaryEdgeItems, _float_output_precision, \ - _line_width, _float_output_suppress_small, _nan_str, _inf_str, \ - _formatter - if linewidth is not None: - _line_width = linewidth - if threshold is not None: - _summaryThreshold = threshold - if edgeitems is not None: - _summaryEdgeItems = edgeitems - if precision is not None: - _float_output_precision = precision - if suppress is not None: - _float_output_suppress_small = not not suppress - if nanstr is not None: - _nan_str = nanstr - if infstr is not None: - _inf_str = infstr - _formatter = formatter - -def get_printoptions(): - """ - Return the current print options. - - Returns - ------- - print_opts : dict - Dictionary of current print options with keys - - - precision : int - - threshold : int - - edgeitems : int - - linewidth : int - - suppress : bool - - nanstr : str - - infstr : str - - formatter : dict of callables - - For a full description of these options, see `set_printoptions`. - - See Also - -------- - set_printoptions, set_string_function - - """ - d = dict(precision=_float_output_precision, - threshold=_summaryThreshold, - edgeitems=_summaryEdgeItems, - linewidth=_line_width, - suppress=_float_output_suppress_small, - nanstr=_nan_str, - infstr=_inf_str, - formatter=_formatter) - return d - -def _leading_trailing(a): - import numeric as _nc - if a.ndim == 1: - if len(a) > 2*_summaryEdgeItems: - b = _nc.concatenate((a[:_summaryEdgeItems], - a[-_summaryEdgeItems:])) - else: - b = a - else: - if len(a) > 2*_summaryEdgeItems: - l = [_leading_trailing(a[i]) for i in range( - min(len(a), _summaryEdgeItems))] - l.extend([_leading_trailing(a[-i]) for i in range( - min(len(a), _summaryEdgeItems),0,-1)]) - else: - l = [_leading_trailing(a[i]) for i in range(0, len(a))] - b = _nc.concatenate(tuple(l)) - return b - -def _boolFormatter(x): - if x: - return ' True' - else: - return 'False' - - -def repr_format(x): - return repr(x) - -def _array2string(a, max_line_width, precision, suppress_small, separator=' ', - prefix="", formatter=None): - - if max_line_width is None: - max_line_width = _line_width - - if precision is None: - precision = _float_output_precision - - if suppress_small is None: - suppress_small = _float_output_suppress_small - - if formatter is None: - formatter = _formatter - - if a.size > _summaryThreshold: - summary_insert = "..., " - data = _leading_trailing(a) - else: - summary_insert = "" - data = ravel(a) - - formatdict = {'bool' : _boolFormatter, - 'int' : IntegerFormat(data), - 'float' : FloatFormat(data, precision, suppress_small), - 'longfloat' : FloatFormat(data, precision, suppress_small), - 'complexfloat' : ComplexFormat(data, precision, - suppress_small), - 'longcomplexfloat' : ComplexFormat(data, precision, - suppress_small), - 'datetime' : DatetimeFormat(data), - 'timedelta' : TimedeltaFormat(data), - 'numpystr' : repr_format, - 'str' : str} - - if formatter is not None: - fkeys = [k for k in formatter.keys() if formatter[k] is not None] - if 'all' in fkeys: - for key in formatdict.keys(): - formatdict[key] = formatter['all'] - if 'int_kind' in fkeys: - for key in ['int']: - formatdict[key] = formatter['int_kind'] - if 'float_kind' in fkeys: - for key in ['float', 'longfloat']: - formatdict[key] = formatter['float_kind'] - if 'complex_kind' in fkeys: - for key in ['complexfloat', 'longcomplexfloat']: - formatdict[key] = formatter['complex_kind'] - if 'str_kind' in fkeys: - for key in ['numpystr', 'str']: - formatdict[key] = formatter['str_kind'] - for key in formatdict.keys(): - if key in fkeys: - formatdict[key] = formatter[key] - - try: - format_function = a._format - msg = "The `_format` attribute is deprecated in Numpy 2.0 and " \ - "will be removed in 2.1. Use the `formatter` kw instead." - import warnings - warnings.warn(msg, DeprecationWarning) - except AttributeError: - # find the right formatting function for the array - dtypeobj = a.dtype.type - if issubclass(dtypeobj, _nt.bool_): - format_function = formatdict['bool'] - elif issubclass(dtypeobj, _nt.integer): - #if issubclass(dtypeobj, _nt.timedelta64): - # format_function = formatdict['timedelta'] - #else: - format_function = formatdict['int'] - elif issubclass(dtypeobj, _nt.floating): - if hasattr(_nt, 'longfloat') and issubclass(dtypeobj, _nt.longfloat): - format_function = formatdict['longfloat'] - else: - format_function = formatdict['float'] - elif issubclass(dtypeobj, _nt.complexfloating): - if hasattr(_nt, 'clongfloat') and issubclass(dtypeobj, _nt.clongfloat): - format_function = formatdict['longcomplexfloat'] - else: - format_function = formatdict['complexfloat'] - elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)): - format_function = formatdict['numpystr'] - #elif issubclass(dtypeobj, _nt.datetime64): - # format_function = formatdict['datetime'] - else: - format_function = formatdict['str'] - - # skip over "[" - next_line_prefix = " " - # skip over array( - next_line_prefix += " "*len(prefix) - - lst = _formatArray(a, format_function, len(a.shape), max_line_width, - next_line_prefix, separator, - _summaryEdgeItems, summary_insert)[:-1] - return lst - -def _convert_arrays(obj): - import numeric as _nc - newtup = [] - for k in obj: - if isinstance(k, _nc.ndarray): - k = k.tolist() - elif isinstance(k, tuple): - k = _convert_arrays(k) - newtup.append(k) - return tuple(newtup) - - -def array2string(a, max_line_width=None, precision=None, - suppress_small=None, separator=' ', prefix="", - style=repr, formatter=None): - """ - Return a string representation of an array. - - Parameters - ---------- - a : ndarray - Input array. - max_line_width : int, optional - The maximum number of columns the string should span. Newline - characters splits the string appropriately after array elements. - precision : int, optional - Floating point precision. Default is the current printing - precision (usually 8), which can be altered using `set_printoptions`. - suppress_small : bool, optional - Represent very small numbers as zero. A number is "very small" if it - is smaller than the current printing precision. - separator : str, optional - Inserted between elements. - prefix : str, optional - An array is typically printed as:: - - 'prefix(' + array2string(a) + ')' - - The length of the prefix string is used to align the - output correctly. - style : function, optional - A function that accepts an ndarray and returns a string. Used only - when the shape of `a` is equal to ``()``, i.e. for 0-D arrays. - formatter : dict of callables, optional - If not None, the keys should indicate the type(s) that the respective - formatting function applies to. Callables should return a string. - Types that are not specified (by their corresponding keys) are handled - by the default formatters. Individual types for which a formatter - can be set are:: - - - 'bool' - - 'int' - - 'timedelta' : a `numpy.timedelta64` - - 'datetime' : a `numpy.datetime64` - - 'float' - - 'longfloat' : 128-bit floats - - 'complexfloat' - - 'longcomplexfloat' : composed of two 128-bit floats - - 'numpy_str' : types `numpy.string_` and `numpy.unicode_` - - 'str' : all other strings - - Other keys that can be used to set a group of types at once are:: - - - 'all' : sets all types - - 'int_kind' : sets 'int' - - 'float_kind' : sets 'float' and 'longfloat' - - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' - - Returns - ------- - array_str : str - String representation of the array. - - Raises - ------ - TypeError : if a callable in `formatter` does not return a string. - - See Also - -------- - array_str, array_repr, set_printoptions, get_printoptions - - Notes - ----- - If a formatter is specified for a certain type, the `precision` keyword is - ignored for that type. - - Examples - -------- - >>> x = np.array([1e-16,1,2,3]) - >>> print np.array2string(x, precision=2, separator=',', - ... suppress_small=True) - [ 0., 1., 2., 3.] - - >>> x = np.arange(3.) - >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) - '[0.00 1.00 2.00]' - - >>> x = np.arange(3) - >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) - '[0x0L 0x1L 0x2L]' - - """ - - if a.shape == (): - x = a.item() - try: - lst = a._format(x) - msg = "The `_format` attribute is deprecated in Numpy " \ - "2.0 and will be removed in 2.1. Use the " \ - "`formatter` kw instead." - import warnings - warnings.warn(msg, DeprecationWarning) - except AttributeError: - if isinstance(x, tuple): - x = _convert_arrays(x) - lst = style(x) - elif reduce(product, a.shape) == 0: - # treat as a null array if any of shape elements == 0 - lst = "[]" - else: - lst = _array2string(a, max_line_width, precision, suppress_small, - separator, prefix, formatter=formatter) - return lst - -def _extendLine(s, line, word, max_line_len, next_line_prefix): - if len(line.rstrip()) + len(word.rstrip()) >= max_line_len: - s += line.rstrip() + "\n" - line = next_line_prefix - line += word - return s, line - - -def _formatArray(a, format_function, rank, max_line_len, - next_line_prefix, separator, edge_items, summary_insert): - """formatArray is designed for two modes of operation: - - 1. Full output - - 2. Summarized output - - """ - if rank == 0: - obj = a.item() - if isinstance(obj, tuple): - obj = _convert_arrays(obj) - return str(obj) - - if summary_insert and 2*edge_items < len(a): - leading_items, trailing_items, summary_insert1 = \ - edge_items, edge_items, summary_insert - else: - leading_items, trailing_items, summary_insert1 = 0, len(a), "" - - if rank == 1: - s = "" - line = next_line_prefix - for i in xrange(leading_items): - word = format_function(a[i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - - if summary_insert1: - s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix) - - for i in xrange(trailing_items, 1, -1): - word = format_function(a[-i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - - word = format_function(a[-1]) - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - s += line + "]\n" - s = '[' + s[len(next_line_prefix):] - else: - s = '[' - sep = separator.rstrip() - for i in xrange(leading_items): - if i > 0: - s += next_line_prefix - s += _formatArray(a[i], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1) - - if summary_insert1: - s += next_line_prefix + summary_insert1 + "\n" - - for i in xrange(trailing_items, 1, -1): - if leading_items or i != trailing_items: - s += next_line_prefix - s += _formatArray(a[-i], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1) - if leading_items or trailing_items > 1: - s += next_line_prefix - s += _formatArray(a[-1], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert).rstrip()+']\n' - return s - -class FloatFormat(object): - def __init__(self, data, precision, suppress_small, sign=False): - self.precision = precision - self.suppress_small = suppress_small - self.sign = sign - self.exp_format = False - self.large_exponent = False - self.max_str_len = 0 - try: - self.fillFormat(data) - except (TypeError, NotImplementedError): - # if reduce(data) fails, this instance will not be called, just - # instantiated in formatdict. - pass - - def fillFormat(self, data): - import numeric as _nc - errstate = _nc.seterr(all='ignore') - try: - special = isnan(data) | isinf(data) - valid = not_equal(data, 0) & ~special - non_zero = absolute(data.compress(valid)) - if len(non_zero) == 0: - max_val = 0. - min_val = 0. - else: - max_val = maximum.reduce(non_zero) - min_val = minimum.reduce(non_zero) - if max_val >= 1.e8: - self.exp_format = True - if not self.suppress_small and (min_val < 0.0001 - or max_val/min_val > 1000.): - self.exp_format = True - finally: - _nc.seterr(**errstate) - - if self.exp_format: - self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100 - self.max_str_len = 8 + self.precision - if self.large_exponent: - self.max_str_len += 1 - if self.sign: - format = '%+' - else: - format = '%' - format = format + '%d.%de' % (self.max_str_len, self.precision) - else: - format = '%%.%df' % (self.precision,) - if len(non_zero): - precision = max([_digits(x, self.precision, format) - for x in non_zero]) - else: - precision = 0 - precision = min(self.precision, precision) - self.max_str_len = len(str(int(max_val))) + precision + 2 - if _nc.any(special): - self.max_str_len = max(self.max_str_len, - len(_nan_str), - len(_inf_str)+1) - if self.sign: - format = '%#+' - else: - format = '%#' - format = format + '%d.%df' % (self.max_str_len, precision) - - self.special_fmt = '%%%ds' % (self.max_str_len,) - self.format = format - - def __call__(self, x, strip_zeros=True): - import numeric as _nc - err = _nc.seterr(invalid='ignore') - try: - if isnan(x): - if self.sign: - return self.special_fmt % ('+' + _nan_str,) - else: - return self.special_fmt % (_nan_str,) - elif isinf(x): - if x > 0: - if self.sign: - return self.special_fmt % ('+' + _inf_str,) - else: - return self.special_fmt % (_inf_str,) - else: - return self.special_fmt % ('-' + _inf_str,) - finally: - _nc.seterr(**err) - - s = self.format % x - if self.large_exponent: - # 3-digit exponent - expsign = s[-3] - if expsign == '+' or expsign == '-': - s = s[1:-2] + '0' + s[-2:] - elif self.exp_format: - # 2-digit exponent - if s[-3] == '0': - s = ' ' + s[:-3] + s[-2:] - elif strip_zeros: - z = s.rstrip('0') - s = z + ' '*(len(s)-len(z)) - return s - - -def _digits(x, precision, format): - s = format % x - z = s.rstrip('0') - return precision - len(s) + len(z) - - -_MAXINT = sys.maxint -_MININT = -sys.maxint-1 -class IntegerFormat(object): - def __init__(self, data): - try: - max_str_len = max(len(str(maximum.reduce(data))), - len(str(minimum.reduce(data)))) - self.format = '%' + str(max_str_len) + 'd' - except (TypeError, NotImplementedError): - # if reduce(data) fails, this instance will not be called, just - # instantiated in formatdict. - pass - except ValueError: - # this occurs when everything is NA - pass - - def __call__(self, x): - if _MININT < x < _MAXINT: - return self.format % x - else: - return "%s" % x - -class LongFloatFormat(object): - # XXX Have to add something to determine the width to use a la FloatFormat - # Right now, things won't line up properly - def __init__(self, precision, sign=False): - self.precision = precision - self.sign = sign - - def __call__(self, x): - if isnan(x): - if self.sign: - return '+' + _nan_str - else: - return ' ' + _nan_str - elif isinf(x): - if x > 0: - if self.sign: - return '+' + _inf_str - else: - return ' ' + _inf_str - else: - return '-' + _inf_str - elif x >= 0: - if self.sign: - return '+' + format_longfloat(x, self.precision) - else: - return ' ' + format_longfloat(x, self.precision) - else: - return format_longfloat(x, self.precision) - - -class LongComplexFormat(object): - def __init__(self, precision): - self.real_format = LongFloatFormat(precision) - self.imag_format = LongFloatFormat(precision, sign=True) - - def __call__(self, x): - r = self.real_format(x.real) - i = self.imag_format(x.imag) - return r + i + 'j' - - -class ComplexFormat(object): - def __init__(self, x, precision, suppress_small): - self.real_format = FloatFormat(x.real, precision, suppress_small) - self.imag_format = FloatFormat(x.imag, precision, suppress_small, - sign=True) - - def __call__(self, x): - r = self.real_format(x.real, strip_zeros=False) - i = self.imag_format(x.imag, strip_zeros=False) - if not self.imag_format.exp_format: - z = i.rstrip('0') - i = z + 'j' + ' '*(len(i)-len(z)) - else: - i = i + 'j' - return r + i - -class DatetimeFormat(object): - def __init__(self, x, unit=None, - timezone=None, casting='same_kind'): - # Get the unit from the dtype - if unit is None: - if x.dtype.kind == 'M': - unit = datetime_data(x.dtype)[0] - else: - unit = 's' - - # If timezone is default, make it 'local' or 'UTC' based on the unit - if timezone is None: - # Date units -> UTC, time units -> local - if unit in ('Y', 'M', 'W', 'D'): - self.timezone = 'UTC' - else: - self.timezone = 'local' - else: - self.timezone = timezone - self.unit = unit - self.casting = casting - - def __call__(self, x): - return "'%s'" % datetime_as_string(x, - unit=self.unit, - timezone=self.timezone, - casting=self.casting) - -class TimedeltaFormat(object): - def __init__(self, data): - if data.dtype.kind == 'm': - v = data.view('i8') - max_str_len = max(len(str(maximum.reduce(v))), - len(str(minimum.reduce(v)))) - self.format = '%' + str(max_str_len) + 'd' - - def __call__(self, x): - return self.format % x.astype('i8') - diff --git a/lib_pypy/numpypy/core/fromnumeric.py b/lib_pypy/numpypy/core/fromnumeric.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/fromnumeric.py +++ /dev/null @@ -1,2924 +0,0 @@ -###################################################################### -# This is a copy of numpy/core/fromnumeric.py modified for numpypy -###################################################################### -"""Module containing non-deprecated functions borrowed from Numeric. - -""" -from __future__ import division, absolute_import, print_function - -import types - -from . import multiarray as mu -from . import umath as um -from . import numerictypes as nt -from .numeric import asarray, array, asanyarray, concatenate -from . import _methods - - -# functions that are methods -__all__ = [ - 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax', - 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', - 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean', - 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put', - 'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_', - 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze', - 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var', - ] - - -try: - _gentype = types.GeneratorType -except AttributeError: - _gentype = type(None) - -# save away Python sum -_sum_ = sum - -# functions that are now methods -def _wrapit(obj, method, *args, **kwds): - try: - wrap = obj.__array_wrap__ - except AttributeError: - wrap = None - result = getattr(asarray(obj), method)(*args, **kwds) - if wrap: - if not isinstance(result, mu.ndarray): - result = asarray(result) - result = wrap(result) - return result - - -def take(a, indices, axis=None, out=None, mode='raise'): - """ - Take elements from an array along an axis. - - This function does the same thing as "fancy" indexing (indexing arrays - using arrays); however, it can be easier to use if you need elements - along a given axis. - - Parameters - ---------- - a : array_like - The source array. - indices : array_like - The indices of the values to extract. - - .. versionadded:: 1.8.0 - - Also allow scalars for indices. - axis : int, optional - The axis over which to select values. By default, the flattened - input array is used. - out : ndarray, optional - If provided, the result will be placed in this array. It should - be of the appropriate shape and dtype. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - 'clip' mode means that all indices that are too large are replaced - by the index that addresses the last element along that axis. Note - that this disables indexing with negative numbers. - - Returns - ------- - subarray : ndarray - The returned array has the same type as `a`. - - See Also - -------- - ndarray.take : equivalent method - - Examples - -------- - >>> a = [4, 3, 5, 7, 6, 8] - >>> indices = [0, 1, 4] - >>> np.take(a, indices) - array([4, 3, 6]) - - In this example if `a` is an ndarray, "fancy" indexing can be used. - - >>> a = np.array(a) - >>> a[indices] - array([4, 3, 6]) - - If `indices` is not one dimensional, the output also has these dimensions. - - >>> np.take(a, [[0, 1], [2, 3]]) - array([[4, 3], - [5, 7]]) - """ - try: - take = a.take - except AttributeError: - return _wrapit(a, 'take', indices, axis, out, mode) - return take(indices, axis, out, mode) - - -# not deprecated --- copy if necessary, view otherwise -def reshape(a, newshape, order='C'): - """ - Gives a new shape to an array without changing its data. - - Parameters - ---------- - a : array_like - Array to be reshaped. - newshape : int or tuple of ints - The new shape should be compatible with the original shape. If - an integer, then the result will be a 1-D array of that length. - One shape dimension can be -1. In this case, the value is inferred - from the length of the array and remaining dimensions. - order : {'C', 'F', 'A'}, optional - Read the elements of `a` using this index order, and place the elements - into the reshaped array using this index order. 'C' means to - read / write the elements using C-like index order, with the last axis index - changing fastest, back to the first axis index changing slowest. 'F' - means to read / write the elements using Fortran-like index order, with - the first index changing fastest, and the last index changing slowest. - Note that the 'C' and 'F' options take no account of the memory layout - of the underlying array, and only refer to the order of indexing. 'A' - means to read / write the elements in Fortran-like index order if `a` is - Fortran *contiguous* in memory, C-like order otherwise. - - Returns - ------- - reshaped_array : ndarray - This will be a new view object if possible; otherwise, it will - be a copy. Note there is no guarantee of the *memory layout* (C- or - Fortran- contiguous) of the returned array. - - See Also - -------- - ndarray.reshape : Equivalent method. - - Notes - ----- - It is not always possible to change the shape of an array without - copying the data. If you want an error to be raise if the data is copied, - you should assign the new shape to the shape attribute of the array:: - - >>> a = np.zeros((10, 2)) - # A transpose make the array non-contiguous - >>> b = a.T - # Taking a view makes it possible to modify the shape without modifying the - # initial object. - >>> c = b.view() - >>> c.shape = (20) - AttributeError: incompatible shape for a non-contiguous array - - The `order` keyword gives the index ordering both for *fetching* the values - from `a`, and then *placing* the values into the output array. For example, - let's say you have an array: - - >>> a = np.arange(6).reshape((3, 2)) - >>> a - array([[0, 1], - [2, 3], - [4, 5]]) - - You can think of reshaping as first raveling the array (using the given - index order), then inserting the elements from the raveled array into the - new array using the same kind of index ordering as was used for the - raveling. - - >>> np.reshape(a, (2, 3)) # C-like index ordering - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering - array([[0, 4, 3], - [2, 1, 5]]) - >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F') - array([[0, 4, 3], - [2, 1, 5]]) - - Examples - -------- - >>> a = np.array([[1,2,3], [4,5,6]]) - >>> np.reshape(a, 6) - array([1, 2, 3, 4, 5, 6]) - >>> np.reshape(a, 6, order='F') - array([1, 4, 2, 5, 3, 6]) - - >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 - array([[1, 2], - [3, 4], - [5, 6]]) - """ - assert order == 'C' - try: - reshape = a.reshape - except AttributeError: - return _wrapit(a, 'reshape', newshape) - return reshape(newshape) - - -def choose(a, choices, out=None, mode='raise'): - """ - Construct an array from an index array and a set of arrays to choose from. - - First of all, if confused or uncertain, definitely look at the Examples - - in its full generality, this function is less simple than it might - seem from the following code description (below ndi = - `numpy.lib.index_tricks`): - - ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``. - - But this omits some subtleties. Here is a fully general summary: - - Given an "index" array (`a`) of integers and a sequence of `n` arrays - (`choices`), `a` and each choice array are first broadcast, as necessary, - to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = - 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` - for each `i`. Then, a new array with shape ``Ba.shape`` is created as - follows: - - * if ``mode=raise`` (the default), then, first of all, each element of - `a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that - `i` (in that range) is the value at the `(j0, j1, ..., jm)` position - in `Ba` - then the value at the same position in the new array is the - value in `Bchoices[i]` at that same position; - - * if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed) - integer; modular arithmetic is used to map integers outside the range - `[0, n-1]` back into that range; and then the new array is constructed - as above; - - * if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed) - integer; negative integers are mapped to 0; values greater than `n-1` - are mapped to `n-1`; and then the new array is constructed as above. - - Parameters - ---------- - a : int array - This array must contain integers in `[0, n-1]`, where `n` is the number - of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any - integers are permissible. - choices : sequence of arrays - Choice arrays. `a` and all of the choices must be broadcastable to the - same shape. If `choices` is itself an array (not recommended), then - its outermost dimension (i.e., the one corresponding to - ``choices.shape[0]``) is taken as defining the "sequence". - out : array, optional - If provided, the result will be inserted into this array. It should - be of the appropriate shape and dtype. - mode : {'raise' (default), 'wrap', 'clip'}, optional - Specifies how indices outside `[0, n-1]` will be treated: - - * 'raise' : an exception is raised - * 'wrap' : value becomes value mod `n` - * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 - - Returns - ------- - merged_array : array - The merged result. - - Raises - ------ - ValueError: shape mismatch - If `a` and each choice array are not all broadcastable to the same - shape. - - See Also - -------- - ndarray.choose : equivalent method - - Notes - ----- - To reduce the chance of misinterpretation, even though the following - "abuse" is nominally supported, `choices` should neither be, nor be - thought of as, a single array, i.e., the outermost sequence-like container - should be either a list or a tuple. - - Examples - -------- - - >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], - ... [20, 21, 22, 23], [30, 31, 32, 33]] - >>> np.choose([2, 3, 1, 0], choices - ... # the first element of the result will be the first element of the - ... # third (2+1) "array" in choices, namely, 20; the second element - ... # will be the second element of the fourth (3+1) choice array, i.e., - ... # 31, etc. - ... ) - array([20, 31, 12, 3]) - >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) - array([20, 31, 12, 3]) - >>> # because there are 4 choice arrays - >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) - array([20, 1, 12, 3]) - >>> # i.e., 0 - - A couple examples illustrating how choose broadcasts: - - >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] - >>> choices = [-10, 10] - >>> np.choose(a, choices) - array([[ 10, -10, 10], - [-10, 10, -10], - [ 10, -10, 10]]) - - >>> # With thanks to Anne Archibald - >>> a = np.array([0, 1]).reshape((2,1,1)) - >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) - >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) - >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 - array([[[ 1, 1, 1, 1, 1], - [ 2, 2, 2, 2, 2], - [ 3, 3, 3, 3, 3]], - [[-1, -2, -3, -4, -5], - [-1, -2, -3, -4, -5], - [-1, -2, -3, -4, -5]]]) - - """ - try: - choose = a.choose - except AttributeError: - return _wrapit(a, 'choose', choices, out=out, mode=mode) - return choose(choices, out=out, mode=mode) - - -def repeat(a, repeats, axis=None): - """ - Repeat elements of an array. - - Parameters - ---------- - a : array_like - Input array. - repeats : {int, array of ints} - The number of repetitions for each element. `repeats` is broadcasted - to fit the shape of the given axis. - axis : int, optional - The axis along which to repeat values. By default, use the - flattened input array, and return a flat output array. - - Returns - ------- - repeated_array : ndarray - Output array which has the same shape as `a`, except along - the given axis. - - See Also - -------- - tile : Tile an array. - - Examples - -------- - >>> x = np.array([[1,2],[3,4]]) - >>> np.repeat(x, 2) - array([1, 1, 2, 2, 3, 3, 4, 4]) - >>> np.repeat(x, 3, axis=1) - array([[1, 1, 1, 2, 2, 2], - [3, 3, 3, 4, 4, 4]]) - >>> np.repeat(x, [1, 2], axis=0) - array([[1, 2], - [3, 4], - [3, 4]]) - - """ - try: - repeat = a.repeat - except AttributeError: - return _wrapit(a, 'repeat', repeats, axis) - return repeat(repeats, axis) - - -def put(a, ind, v, mode='raise'): - """ - Replaces specified elements of an array with given values. - - The indexing works on the flattened target array. `put` is roughly - equivalent to: - - :: - - a.flat[ind] = v - - Parameters - ---------- - a : ndarray - Target array. - ind : array_like - Target indices, interpreted as integers. - v : array_like - Values to place in `a` at target indices. If `v` is shorter than - `ind` it will be repeated as necessary. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - 'clip' mode means that all indices that are too large are replaced - by the index that addresses the last element along that axis. Note - that this disables indexing with negative numbers. - - See Also - -------- - putmask, place - - Examples - -------- - >>> a = np.arange(5) - >>> np.put(a, [0, 2], [-44, -55]) - >>> a - array([-44, 1, -55, 3, 4]) - - >>> a = np.arange(5) - >>> np.put(a, 22, -5, mode='clip') - >>> a - array([ 0, 1, 2, 3, -5]) - - """ - return a.put(ind, v, mode) - - -def swapaxes(a, axis1, axis2): - """ - Interchange two axes of an array. - - Parameters - ---------- - a : array_like - Input array. - axis1 : int - First axis. - axis2 : int - Second axis. - - Returns - ------- - a_swapped : ndarray - If `a` is an ndarray, then a view of `a` is returned; otherwise - a new array is created. - - Examples - -------- - >>> x = np.array([[1,2,3]]) - >>> np.swapaxes(x,0,1) - array([[1], - [2], - [3]]) - - >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) - >>> x - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - - >>> np.swapaxes(x,0,2) - array([[[0, 4], - [2, 6]], - [[1, 5], - [3, 7]]]) - - """ - try: - swapaxes = a.swapaxes - except AttributeError: - return _wrapit(a, 'swapaxes', axis1, axis2) - return swapaxes(axis1, axis2) - - -def transpose(a, axes=None): - """ - Permute the dimensions of an array. - - Parameters - ---------- - a : array_like - Input array. - axes : list of ints, optional - By default, reverse the dimensions, otherwise permute the axes - according to the values given. - - Returns - ------- - p : ndarray - `a` with its axes permuted. A view is returned whenever - possible. - - See Also - -------- - rollaxis - - Examples - -------- - >>> x = np.arange(4).reshape((2,2)) - >>> x - array([[0, 1], - [2, 3]]) - - >>> np.transpose(x) - array([[0, 2], - [1, 3]]) - - >>> x = np.ones((1, 2, 3)) - >>> np.transpose(x, (1, 0, 2)).shape - (2, 1, 3) - - """ - if axes is not None: - raise NotImplementedError('No "axes" arg yet.') - try: - transpose = a.transpose - except AttributeError: - return _wrapit(a, 'transpose') - return transpose() - - -def partition(a, kth, axis=-1, kind='introselect', order=None): - """ - Return a partitioned copy of an array. - - Creates a copy of the array with its elements rearranged in such a way that - the value of the element in kth position is in the position it would be in - a sorted array. All elements smaller than the kth element are moved before - this element and all equal or greater are moved behind it. The ordering of - the elements in the two partitions is undefined. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Array to be sorted. - kth : int or sequence of ints - Element index to partition by. The kth value of the element will be in - its final sorted position and all smaller elements will be moved before - it and all equal or greater elements behind it. - The order all elements in the partitions is undefined. - If provided with a sequence of kth it will partition all elements - indexed by kth of them into their sorted position at once. - axis : int or None, optional - Axis along which to sort. If None, the array is flattened before - sorting. The default is -1, which sorts along the last axis. - kind : {'introselect'}, optional - Selection algorithm. Default is 'introselect'. - order : list, optional - When `a` is a structured array, this argument specifies which fields - to compare first, second, and so on. This list does not need to - include all of the fields. - - Returns - ------- - partitioned_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - ndarray.partition : Method to sort an array in-place. - argpartition : Indirect partition. - sort : Full sorting - - Notes - ----- - The various selection algorithms are characterized by their average speed, - worst case performance, work space size, and whether they are stable. A - stable sort keeps items with the same key in the same relative order. The - three available algorithms have the following properties: - - ================= ======= ============= ============ ======= - kind speed worst case work space stable - ================= ======= ============= ============ ======= - 'introselect' 1 O(n) 0 no - ================= ======= ============= ============ ======= - - All the partition algorithms make temporary copies of the data when - partitioning along any but the last axis. Consequently, partitioning - along the last axis is faster and uses less space than partitioning - along any other axis. - - The sort order for complex numbers is lexicographic. If both the real - and imaginary parts are non-nan then the order is determined by the - real parts except when they are equal, in which case the order is - determined by the imaginary parts. - - Examples - -------- - >>> a = np.array([3, 4, 2, 1]) - >>> np.partition(a, 3) - array([2, 1, 3, 4]) - - >>> np.partition(a, (1, 3)) - array([1, 2, 3, 4]) - - """ - if axis is None: - a = asanyarray(a).flatten() - axis = 0 - else: - a = asanyarray(a).copy() - a.partition(kth, axis=axis, kind=kind, order=order) - return a - - -def argpartition(a, kth, axis=-1, kind='introselect', order=None): - """ - Perform an indirect partition along the given axis using the algorithm - specified by the `kind` keyword. It returns an array of indices of the - same shape as `a` that index data along the given axis in partitioned - order. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Array to sort. - kth : int or sequence of ints - Element index to partition by. The kth element will be in its final - sorted position and all smaller elements will be moved before it and - all larger elements behind it. - The order all elements in the partitions is undefined. - If provided with a sequence of kth it will partition all of them into - their sorted position at once. - axis : int or None, optional - Axis along which to sort. The default is -1 (the last axis). If None, - the flattened array is used. - kind : {'introselect'}, optional - Selection algorithm. Default is 'introselect' - order : list, optional - When `a` is an array with fields defined, this argument specifies - which fields to compare first, second, etc. Not all fields need be - specified. - - Returns - ------- - index_array : ndarray, int - Array of indices that partition `a` along the specified axis. - In other words, ``a[index_array]`` yields a sorted `a`. - - See Also - -------- - partition : Describes partition algorithms used. - ndarray.partition : Inplace partition. - argsort : Full indirect sort - - Notes - ----- - See `partition` for notes on the different selection algorithms. - - Examples - -------- - One dimensional array: - - >>> x = np.array([3, 4, 2, 1]) - >>> x[np.argpartition(x, 3)] - array([2, 1, 3, 4]) - >>> x[np.argpartition(x, (1, 3))] - array([1, 2, 3, 4]) - - """ - return a.argpartition(kth, axis, kind=kind, order=order) - - -def sort(a, axis=-1, kind='quicksort', order=None): - """ - Return a sorted copy of an array. - - Parameters - ---------- - a : array_like - Array to be sorted. - axis : int or None, optional - Axis along which to sort. If None, the array is flattened before - sorting. The default is -1, which sorts along the last axis. - kind : {'quicksort', 'mergesort', 'heapsort'}, optional - Sorting algorithm. Default is 'quicksort'. - order : list, optional - When `a` is a structured array, this argument specifies which fields - to compare first, second, and so on. This list does not need to - include all of the fields. - - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - ndarray.sort : Method to sort an array in-place. - argsort : Indirect sort. - lexsort : Indirect stable sort on multiple keys. - searchsorted : Find elements in a sorted array. - partition : Partial sort. - - Notes - ----- - The various sorting algorithms are characterized by their average speed, - worst case performance, work space size, and whether they are stable. A - stable sort keeps items with the same key in the same relative - order. The three available algorithms have the following - properties: - - =========== ======= ============= ============ ======= - kind speed worst case work space stable - =========== ======= ============= ============ ======= - 'quicksort' 1 O(n^2) 0 no - 'mergesort' 2 O(n*log(n)) ~n/2 yes - 'heapsort' 3 O(n*log(n)) 0 no - =========== ======= ============= ============ ======= - - All the sort algorithms make temporary copies of the data when - sorting along any but the last axis. Consequently, sorting along - the last axis is faster and uses less space than sorting along - any other axis. - - The sort order for complex numbers is lexicographic. If both the real - and imaginary parts are non-nan then the order is determined by the - real parts except when they are equal, in which case the order is - determined by the imaginary parts. - - Previous to numpy 1.4.0 sorting real and complex arrays containing nan - values led to undefined behaviour. In numpy versions >= 1.4.0 nan - values are sorted to the end. The extended sort order is: - - * Real: [R, nan] - * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] - - where R is a non-nan real value. Complex values with the same nan - placements are sorted according to the non-nan part if it exists. - Non-nan values are sorted as before. - - Examples - -------- - >>> a = np.array([[1,4],[3,1]]) - >>> np.sort(a) # sort along the last axis - array([[1, 4], - [1, 3]]) - >>> np.sort(a, axis=None) # sort the flattened array - array([1, 1, 3, 4]) - >>> np.sort(a, axis=0) # sort along the first axis - array([[1, 1], - [3, 4]]) - - Use the `order` keyword to specify a field to use when sorting a - structured array: - - >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] - >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), - ... ('Galahad', 1.7, 38)] - >>> a = np.array(values, dtype=dtype) # create a structured array - >>> np.sort(a, order='height') # doctest: +SKIP - array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), - ('Lancelot', 1.8999999999999999, 38)], - dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP - array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), - ('Arthur', 1.8, 41)], - dtype=[('name', '|S10'), ('height', '>> x = np.array([3, 1, 2]) - >>> np.argsort(x) - array([1, 2, 0]) - - Two-dimensional array: - - >>> x = np.array([[0, 3], [2, 2]]) - >>> x - array([[0, 3], - [2, 2]]) - - >>> np.argsort(x, axis=0) - array([[0, 1], - [1, 0]]) - - >>> np.argsort(x, axis=1) - array([[0, 1], - [0, 1]]) - - Sorting with keys: - - >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> x - array([(1, 0), (0, 1)], - dtype=[('x', '>> np.argsort(x, order=('x','y')) - array([1, 0]) - - >>> np.argsort(x, order=('y','x')) - array([0, 1]) - - """ - try: - argsort = a.argsort - except AttributeError: - return _wrapit(a, 'argsort', axis, kind, order) - return argsort(axis, kind, order) - - -def argmax(a, axis=None): - """ - Indices of the maximum values along an axis. - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - By default, the index is into the flattened array, otherwise - along the specified axis. - - Returns - ------- - index_array : ndarray of ints - Array of indices into the array. It has the same shape as `a.shape` - with the dimension along `axis` removed. - - See Also - -------- - ndarray.argmax, argmin - amax : The maximum value along a given axis. - unravel_index : Convert a flat index into an index tuple. - - Notes - ----- - In case of multiple occurrences of the maximum values, the indices - corresponding to the first occurrence are returned. - - Examples - -------- - >>> a = np.arange(6).reshape(2,3) - >>> a - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.argmax(a) - 5 - >>> np.argmax(a, axis=0) - array([1, 1, 1]) - >>> np.argmax(a, axis=1) - array([2, 2]) - - >>> b = np.arange(6) - >>> b[1] = 5 - >>> b - array([0, 5, 2, 3, 4, 5]) - >>> np.argmax(b) # Only the first occurrence is returned. - 1 - - """ - assert axis is None - try: - argmax = a.argmax - except AttributeError: - return _wrapit(a, 'argmax') - return argmax() - - -def argmin(a, axis=None): - """ - Return the indices of the minimum values along an axis. - - See Also - -------- - argmax : Similar function. Please refer to `numpy.argmax` for detailed - documentation. - - """ - assert axis is None - try: - argmin = a.argmin - except AttributeError: - return _wrapit(a, 'argmin') - return argmin() - - -def searchsorted(a, v, side='left', sorter=None): - """ - Find indices where elements should be inserted to maintain order. - - Find the indices into a sorted array `a` such that, if the - corresponding elements in `v` were inserted before the indices, the - order of `a` would be preserved. - - Parameters - ---------- - a : 1-D array_like - Input array. If `sorter` is None, then it must be sorted in - ascending order, otherwise `sorter` must be an array of indices - that sort it. - v : array_like - Values to insert into `a`. - side : {'left', 'right'}, optional - If 'left', the index of the first suitable location found is given. - If 'right', return the last such index. If there is no suitable - index, return either 0 or N (where N is the length of `a`). - sorter : 1-D array_like, optional - .. versionadded:: 1.7.0 - Optional array of integer indices that sort array a into ascending From noreply at buildbot.pypy.org Sat Nov 9 17:45:03 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 9 Nov 2013 17:45:03 +0100 (CET) Subject: [pypy-commit] pypy cpyext-int: move test Message-ID: <20131109164503.A6E851C00F8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: cpyext-int Changeset: r67906:dd36dcfccc8d Date: 2013-11-09 18:43 +0200 http://bitbucket.org/pypy/pypy/changeset/dd36dcfccc8d/ Log: move test diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py --- a/pypy/module/cpyext/test/test_intobject.py +++ b/pypy/module/cpyext/test/test_intobject.py @@ -97,7 +97,7 @@ return (PyObject *)enumObj; """), - ], + ], prologue=""" typedef struct { @@ -166,3 +166,24 @@ assert isinstance(a, int) assert a == int(a) == 42 assert a.name == "ULTIMATE_ANSWER" + + def test_int_cast(self): + mod = self.import_extension('foo', [ + #prove it works for ints + ("test_int", "METH_NOARGS", + """ + PyObject * obj = PyInt_FromLong(42); + if (!PyInt_Check(obj)) { + Py_DECREF(obj); + PyErr_SetNone(PyExc_ValueError); + return NULL; + } + PyObject * val = PyInt_FromLong(((PyIntObject *)obj)->ob_ival); + Py_DECREF(obj); + return val; + """ + ), + ], prologue='#include ') + i = mod.test_int() + assert isinstance(i, int) + assert i == 42 diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -1,5 +1,3 @@ -import py - from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype @@ -287,23 +285,4 @@ dt = mod.test_DescrFromType(11) assert dt.num == 11 - def test_int_cast(self): - mod = self.import_extension('foo', [ - #prove it works for ints - ("test_int", "METH_NOARGS", - """ - PyObject * obj = PyInt_FromLong(42); - if (!PyInt_Check(obj)) { - Py_DECREF(obj); - PyErr_SetNone(PyExc_ValueError); - return NULL; - } - PyObject * val = PyInt_FromLong(((PyIntObject *)obj)->ob_ival); - Py_DECREF(obj); - return val; - """ - ), - ], prologue='#include ') - i = mod.test_int() - assert isinstance(i, int) - assert i == 42 + From noreply at buildbot.pypy.org Sat Nov 9 17:46:29 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 9 Nov 2013 17:46:29 +0100 (CET) Subject: [pypy-commit] pypy default: Simplify Message-ID: <20131109164629.F31791C00F8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67907:3eaae5cfbc86 Date: 2013-11-09 08:45 -0800 http://bitbucket.org/pypy/pypy/changeset/3eaae5cfbc86/ Log: Simplify diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -93,17 +93,11 @@ def convert_from_object(self, cdata, w_ob): if not self._copy_from_same(cdata, w_ob): - self.convert_struct_from_object(cdata, w_ob) + self.convert_struct_from_object(cdata, w_ob, optvarsize=-1) - def convert_struct_from_object(self, cdata, w_ob, optvarsize=-1): + def convert_struct_from_object(self, cdata, w_ob, optvarsize): self._check_only_one_argument_for_union(w_ob) - self._convert_struct_from_object(cdata, w_ob, optvarsize) - # XXX: needed because look_inside_iff doesn't like default args - @jit.look_inside_iff( - lambda self, cdata, w_ob, optvarsize: jit.isvirtual(w_ob) - ) - def _convert_struct_from_object(self, cdata, w_ob, optvarsize): space = self.space if (space.isinstance_w(w_ob, space.w_list) or space.isinstance_w(w_ob, space.w_tuple)): From noreply at buildbot.pypy.org Sat Nov 9 22:11:44 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 9 Nov 2013 22:11:44 +0100 (CET) Subject: [pypy-commit] pypy cpyext-int: cleanup and document Message-ID: <20131109211144.5C2E81C0144@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: cpyext-int Changeset: r67908:4e1dc333a6e5 Date: 2013-11-09 23:03 +0200 http://bitbucket.org/pypy/pypy/changeset/4e1dc333a6e5/ Log: cleanup and document diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -91,6 +91,7 @@ .. branch: safe-win-mmap .. branch: boolean-indexing-cleanup .. branch: cpyext-best_base +.. branch: cpyext-int .. branch: fileops2 .. branch: nobold-backtrace diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py --- a/pypy/module/cpyext/test/test_intobject.py +++ b/pypy/module/cpyext/test/test_intobject.py @@ -183,7 +183,7 @@ return val; """ ), - ], prologue='#include ') + ]) i = mod.test_int() assert isinstance(i, int) assert i == 42 From noreply at buildbot.pypy.org Sat Nov 9 22:11:46 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 9 Nov 2013 22:11:46 +0100 (CET) Subject: [pypy-commit] pypy cpyext-int: merge default into branch Message-ID: <20131109211146.BE3A81C0162@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: cpyext-int Changeset: r67909:2f1482525200 Date: 2013-11-09 23:06 +0200 http://bitbucket.org/pypy/pypy/changeset/2f1482525200/ Log: merge default into branch diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -47,3 +47,7 @@ * post announcement on morepypy.blogspot.com * send announcements to pypy-dev, python-list, python-announce, python-dev ... + +* add a tag on jitviewer that corresponds to pypy release +* add a tag on codespeed that corresponds to pypy release + diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -7,7 +7,7 @@ appleveldefs = { } interpleveldefs = { - '__version__': 'space.wrap("0.7")', + '__version__': 'space.wrap("0.8")', 'load_library': 'libraryobj.load_library', diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -19,9 +19,9 @@ _cdata = lltype.nullptr(rffi.CCHARP.TO) def __init__(self, space, cdata, ctype): - from pypy.module._cffi_backend import ctypeprim + from pypy.module._cffi_backend import ctypeobj assert lltype.typeOf(cdata) == rffi.CCHARP - assert isinstance(ctype, ctypeprim.W_CType) + assert isinstance(ctype, ctypeobj.W_CType) self.space = space self._cdata = cdata # don't forget keepalive_until_here! self.ctype = ctype @@ -211,7 +211,21 @@ keepalive_until_here(w_value) return # + # A fast path for [0:N] = "somestring". + from pypy.module._cffi_backend import ctypeprim space = self.space + if (space.isinstance_w(w_value, space.w_str) and + isinstance(ctitem, ctypeprim.W_CTypePrimitiveChar)): + from rpython.rtyper.annlowlevel import llstr + from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw + value = space.str_w(w_value) + if len(value) != length: + raise operationerrfmt(space.w_ValueError, + "need a string of length %d, got %d", + length, len(value)) + copy_string_to_raw(llstr(value), cdata, 0, length) + return + # w_iter = space.iter(w_value) for i in range(length): try: @@ -245,19 +259,22 @@ space = self.space if isinstance(w_other, W_CData): from pypy.module._cffi_backend import ctypeptr, ctypearray + from pypy.module._cffi_backend import ctypevoid ct = w_other.ctype if isinstance(ct, ctypearray.W_CTypeArray): ct = ct.ctptr # if (ct is not self.ctype or not isinstance(ct, ctypeptr.W_CTypePointer) or - ct.ctitem.size <= 0): + (ct.ctitem.size <= 0 and not ct.is_void_ptr)): raise operationerrfmt(space.w_TypeError, "cannot subtract cdata '%s' and cdata '%s'", self.ctype.name, ct.name) # + itemsize = ct.ctitem.size + if itemsize <= 0: itemsize = 1 diff = (rffi.cast(lltype.Signed, self._cdata) - - rffi.cast(lltype.Signed, w_other._cdata)) // ct.ctitem.size + rffi.cast(lltype.Signed, w_other._cdata)) // itemsize return space.wrap(diff) # return self._add_or_sub(w_other, -1) @@ -441,6 +458,7 @@ __getitem__ = interp2app(W_CData.getitem), __setitem__ = interp2app(W_CData.setitem), __add__ = interp2app(W_CData.add), + __radd__ = interp2app(W_CData.add), __sub__ = interp2app(W_CData.sub), __getattr__ = interp2app(W_CData.getattr), __setattr__ = interp2app(W_CData.setattr), diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -34,19 +34,8 @@ datasize = self.size # if datasize < 0: - if (space.isinstance_w(w_init, space.w_list) or - space.isinstance_w(w_init, space.w_tuple)): - length = space.int_w(space.len(w_init)) - elif space.isinstance_w(w_init, space.w_basestring): - # from a string, we add the null terminator - length = space.int_w(space.len(w_init)) + 1 - else: - length = space.getindex_w(w_init, space.w_OverflowError) - if length < 0: - raise OperationError(space.w_ValueError, - space.wrap("negative array length")) - w_init = space.w_None - # + from pypy.module._cffi_backend import misc + w_init, length = misc.get_new_array_length(space, w_init) try: datasize = ovfcheck(length * self.ctitem.size) except OverflowError: diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -2,27 +2,25 @@ Pointers. """ -from pypy.interpreter.error import OperationError, operationerrfmt, wrap_oserror - from rpython.rlib import rposix from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import ovfcheck +from rpython.rtyper.annlowlevel import llstr, llunicode from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw, copy_unicode_to_raw +from pypy.interpreter.error import OperationError, operationerrfmt, wrap_oserror from pypy.module._cffi_backend import cdataobj, misc, ctypeprim, ctypevoid from pypy.module._cffi_backend.ctypeobj import W_CType class W_CTypePtrOrArray(W_CType): - _attrs_ = ['ctitem', 'can_cast_anything', 'is_struct_ptr', - 'length'] - _immutable_fields_ = ['ctitem', 'can_cast_anything', 'is_struct_ptr', - 'length'] + _attrs_ = ['ctitem', 'can_cast_anything', 'length'] + _immutable_fields_ = ['ctitem', 'can_cast_anything', 'length'] length = -1 def __init__(self, space, size, extra, extra_position, ctitem, could_cast_anything=True): - from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion name, name_position = ctitem.insert_name(extra, extra_position) W_CType.__init__(self, space, size, name, name_position) # this is the "underlying type": @@ -31,7 +29,6 @@ # - for functions, it is the return type self.ctitem = ctitem self.can_cast_anything = could_cast_anything and ctitem.cast_anything - self.is_struct_ptr = isinstance(ctitem, W_CTypeStructOrUnion) def is_char_ptr_or_array(self): return isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveChar) @@ -90,8 +87,7 @@ "initializer string is too long for '%s'" " (got %d characters)", self.name, n) - for i in range(n): - cdata[i] = s[i] + copy_string_to_raw(llstr(s), cdata, 0, n) if n != self.length: cdata[n] = '\x00' elif isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveUniChar): @@ -105,8 +101,7 @@ " (got %d characters)", self.name, n) unichardata = rffi.cast(rffi.CWCHARP, cdata) - for i in range(n): - unichardata[i] = s[i] + copy_unicode_to_raw(llunicode(s), unichardata, 0, n) if n != self.length: unichardata[n] = u'\x00' else: @@ -157,7 +152,6 @@ return cdataobj.W_CData(self.space, ptrdata, self) def convert_from_object(self, cdata, w_ob): - space = self.space if not isinstance(w_ob, cdataobj.W_CData): raise self._convert_error("cdata pointer", w_ob) other = w_ob.ctype @@ -197,6 +191,7 @@ W_CTypePtrBase.__init__(self, space, size, extra, 2, ctitem) def newp(self, w_init): + from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion space = self.space ctitem = self.ctitem datasize = ctitem.size @@ -204,10 +199,15 @@ raise operationerrfmt(space.w_TypeError, "cannot instantiate ctype '%s' of unknown size", self.name) - if self.is_struct_ptr: + if isinstance(ctitem, W_CTypeStructOrUnion): # 'newp' on a struct-or-union pointer: in this case, we return # a W_CDataPtrToStruct object which has a strong reference # to a W_CDataNewOwning that really contains the structure. + # + if ctitem.with_var_array and not space.is_w(w_init, space.w_None): + datasize = ctitem.convert_struct_from_object( + lltype.nullptr(rffi.CCHARP.TO), w_init, datasize) + # cdatastruct = cdataobj.W_CDataNewOwning(space, datasize, ctitem) cdata = cdataobj.W_CDataPtrToStructOrUnion(space, cdatastruct._cdata, @@ -238,11 +238,15 @@ def add(self, cdata, i): space = self.space ctitem = self.ctitem + itemsize = ctitem.size if ctitem.size < 0: - raise operationerrfmt(space.w_TypeError, + if self.is_void_ptr: + itemsize = 1 + else: + raise operationerrfmt(space.w_TypeError, "ctype '%s' points to items of unknown size", self.name) - p = rffi.ptradd(cdata, i * self.ctitem.size) + p = rffi.ptradd(cdata, i * itemsize) return cdataobj.W_CData(space, p, self) def cast(self, w_ob): @@ -298,7 +302,6 @@ def convert_argument_from_object(self, cdata, w_ob): from pypy.module._cffi_backend.ctypefunc import set_mustfree_flag - space = self.space result = (not isinstance(w_ob, cdataobj.W_CData) and self._prepare_pointer_call_argument(w_ob, cdata)) if result == 0: @@ -320,7 +323,8 @@ space = self.space ctype2 = cdata.ctype if (isinstance(ctype2, W_CTypeStructOrUnion) or - (isinstance(ctype2, W_CTypePtrOrArray) and ctype2.is_struct_ptr)): + (isinstance(ctype2, W_CTypePtrOrArray) and + isinstance(ctype2.ctitem, W_CTypeStructOrUnion))): ptrdata = rffi.ptradd(cdata._cdata, offset) return cdataobj.W_CData(space, ptrdata, self) else: diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -9,7 +9,8 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, intmask -from rpython.rtyper.lltypesystem import rffi +from rpython.rlib.rarithmetic import ovfcheck +from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module._cffi_backend import cdataobj, ctypeprim, misc from pypy.module._cffi_backend.ctypeobj import W_CType @@ -17,12 +18,13 @@ class W_CTypeStructOrUnion(W_CType): _immutable_fields_ = ['alignment?', 'fields_list?', 'fields_dict?', - 'custom_field_pos?'] + 'custom_field_pos?', 'with_var_array?'] # fields added by complete_struct_or_union(): alignment = -1 fields_list = None fields_dict = None custom_field_pos = False + with_var_array = False def __init__(self, space, name): W_CType.__init__(self, space, -1, name, len(name)) @@ -90,12 +92,13 @@ pass def convert_from_object(self, cdata, w_ob): - space = self.space - if self._copy_from_same(cdata, w_ob): - return + if not self._copy_from_same(cdata, w_ob): + self.convert_struct_from_object(cdata, w_ob, optvarsize=-1) + def convert_struct_from_object(self, cdata, w_ob, optvarsize): self._check_only_one_argument_for_union(w_ob) + space = self.space if (space.isinstance_w(w_ob, space.w_list) or space.isinstance_w(w_ob, space.w_tuple)): lst_w = space.listview(w_ob) @@ -104,7 +107,9 @@ "too many initializers for '%s' (got %d)", self.name, len(lst_w)) for i in range(len(lst_w)): - self.fields_list[i].write(cdata, lst_w[i]) + optvarsize = self.fields_list[i].write_v(cdata, lst_w[i], + optvarsize) + return optvarsize elif space.isinstance_w(w_ob, space.w_dict): lst_w = space.fixedview(w_ob) @@ -116,11 +121,16 @@ except KeyError: space.raise_key_error(w_key) assert 0 - cf.write(cdata, space.getitem(w_ob, w_key)) + optvarsize = cf.write_v(cdata, space.getitem(w_ob, w_key), + optvarsize) + return optvarsize else: - raise self._convert_error("list or tuple or dict or struct-cdata", - w_ob) + if optvarsize == -1: + msg = "list or tuple or dict or struct-cdata" + else: + msg = "list or tuple or dict" + raise self._convert_error(msg, w_ob) @jit.elidable def _getcfield_const(self, attr): @@ -192,6 +202,37 @@ else: self.ctype.convert_from_object(cdata, w_ob) + def write_v(self, cdata, w_ob, optvarsize): + # a special case for var-sized C99 arrays + from pypy.module._cffi_backend import ctypearray + ct = self.ctype + if isinstance(ct, ctypearray.W_CTypeArray) and ct.length < 0: + space = ct.space + w_ob, varsizelength = misc.get_new_array_length(space, w_ob) + if optvarsize != -1: + # in this mode, the only purpose of this function is to compute + # the real size of the structure from a var-sized C99 array + assert cdata == lltype.nullptr(rffi.CCHARP.TO) + itemsize = ct.ctitem.size + try: + varsize = ovfcheck(itemsize * varsizelength) + size = ovfcheck(self.offset + varsize) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("array size would overflow a ssize_t")) + assert size >= 0 + return max(size, optvarsize) + # if 'value' was only an integer, get_new_array_length() returns + # w_ob = space.w_None. Detect if this was the case, + # and if so, stop here, leaving the content uninitialized + # (it should be zero-initialized from somewhere else). + if space.is_w(w_ob, space.w_None): + return optvarsize + # + if optvarsize == -1: + self.write(cdata, w_ob) + return optvarsize + def convert_bitfield_to_object(self, cdata): ctype = self.ctype space = ctype.space diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py --- a/pypy/module/_cffi_backend/misc.py +++ b/pypy/module/_cffi_backend/misc.py @@ -278,6 +278,22 @@ # ____________________________________________________________ +def get_new_array_length(space, w_value): + if (space.isinstance_w(w_value, space.w_list) or + space.isinstance_w(w_value, space.w_tuple)): + return (w_value, space.int_w(space.len(w_value))) + elif space.isinstance_w(w_value, space.w_basestring): + # from a string, we add the null terminator + return (w_value, space.int_w(space.len(w_value)) + 1) + else: + explicitlength = space.getindex_w(w_value, space.w_OverflowError) + if explicitlength < 0: + raise OperationError(space.w_ValueError, + space.wrap("negative array length")) + return (space.w_None, explicitlength) + +# ____________________________________________________________ + @specialize.arg(0) def _raw_memcopy_tp(TPP, source, dest): # in its own function: LONGLONG may make the whole function jit-opaque diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -158,8 +158,10 @@ fields_list = [] fields_dict = {} custom_field_pos = False + with_var_array = False - for w_field in fields_w: + for i in range(len(fields_w)): + w_field = fields_w[i] field_w = space.fixedview(w_field) if not (2 <= len(field_w) <= 4): raise OperationError(space.w_TypeError, @@ -176,7 +178,11 @@ "duplicate field name '%s'", fname) # if ftype.size < 0: - raise operationerrfmt(space.w_TypeError, + if (isinstance(ftype, ctypearray.W_CTypeArray) and fbitsize < 0 + and (i == len(fields_w) - 1 or foffset != -1)): + with_var_array = True + else: + raise operationerrfmt(space.w_TypeError, "field '%s.%s' has ctype '%s' of unknown size", w_ctype.name, fname, ftype.name) # @@ -235,7 +241,8 @@ fields_list.append(fld) fields_dict[fname] = fld - boffset += ftype.size * 8 + if ftype.size >= 0: + boffset += ftype.size * 8 prev_bitfield_size = 0 else: @@ -359,6 +366,7 @@ w_ctype.fields_list = fields_list w_ctype.fields_dict = fields_dict w_ctype.custom_field_pos = custom_field_pos + w_ctype.with_var_array = with_var_array # ____________________________________________________________ diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -542,6 +542,7 @@ assert repr(a) == "" % ( 3*5*size_of_int(),) assert repr(a + 0).startswith(" x[i]=y" if space.isinstance_w(w_idx, space.w_slice): @@ -869,9 +868,6 @@ self.buffer[i] = w_item.buffer[j] j += 1 - # We can't look into this function until ptradd works with things (in the - # JIT) other than rffi.CCHARP - @jit.dont_look_inside def delitem(self, space, i, j): if i < 0: i += self.len @@ -907,16 +903,23 @@ lltype.free(oldbuffer, flavor='raw') # Add and mul methods - def descr_add(self, space, w_other): if not isinstance(w_other, W_Array): return space.w_NotImplemented a = mytype.w_class(space) a.setlen(self.len + w_other.len, overallocate=False) - for i in range(self.len): - a.buffer[i] = self.buffer[i] - for i in range(w_other.len): - a.buffer[i + self.len] = w_other.buffer[i] + if self.len: + rffi.c_memcpy( + rffi.cast(rffi.VOIDP, a.buffer), + rffi.cast(rffi.VOIDP, self.buffer), + self.len * mytype.bytes + ) + if w_other.len: + rffi.c_memcpy( + rffi.cast(rffi.VOIDP, rffi.ptradd(a.buffer, self.len)), + rffi.cast(rffi.VOIDP, w_other.buffer), + w_other.len * mytype.bytes + ) return a def descr_inplace_add(self, space, w_other): @@ -925,8 +928,12 @@ oldlen = self.len otherlen = w_other.len self.setlen(oldlen + otherlen) - for i in range(otherlen): - self.buffer[oldlen + i] = w_other.buffer[i] + if otherlen: + rffi.c_memcpy( + rffi.cast(rffi.VOIDP, rffi.ptradd(self.buffer, oldlen)), + rffi.cast(rffi.VOIDP, w_other.buffer), + otherlen * mytype.bytes + ) return self def descr_mul(self, space, w_repeat): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -519,10 +519,13 @@ # by converting nonnative byte order. if self.is_scalar(): return space.wrap(0) - s = self.get_dtype().name - if not self.get_dtype().is_native(): - s = s[1:] - dtype = interp_dtype.get_dtype_cache(space).dtypes_by_name[s] + if not self.get_dtype().is_flexible_type(): + s = self.get_dtype().name + if not self.get_dtype().is_native(): + s = s[1:] + dtype = interp_dtype.get_dtype_cache(space).dtypes_by_name[s] + else: + dtype = self.get_dtype() contig = self.implementation.astype(space, dtype) return contig.argsort(space, w_axis) diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -12,8 +12,7 @@ exp = sorted(range(len(exp)), key=exp.__getitem__) c = a.copy() res = a.argsort() - assert (res == exp).all(), \ - 'a,res,dtype %r,%r,%r' % (a,res,dtype) + assert (res == exp).all(), '%r\n%r\n%r' % (a,res,exp) assert (a == c).all() # not modified a = arange(100, dtype=dtype) @@ -60,11 +59,10 @@ for dtype in ['int', 'float', 'int16', 'float32', 'uint64', 'i2', complex]: a = array([6, 4, -1, 3, 8, 3, 256+20, 100, 101], dtype=dtype) - b = sorted(list(a)) - c = a.copy() - a.sort() - assert (a == b).all(), \ - 'a,orig,dtype %r,%r,%r' % (a,c,dtype) + exp = sorted(list(a)) + res = a.copy() + res.sort() + assert (res == exp).all(), '%r\n%r\n%r' % (a,res,exp) a = arange(100, dtype=dtype) c = a.copy() @@ -85,7 +83,6 @@ #assert (a == b).all(), \ # 'a,orig,dtype %r,%r,%r' % (a,c,dtype) - # tests from numpy/tests/test_multiarray.py def test_sort_corner_cases(self): # test ordering for floats and complex containing nans. It is only @@ -307,7 +304,6 @@ assert (r == array([('a', 1), ('c', 3), ('b', 255), ('d', 258)], dtype=mydtype)).all() - # tests from numpy/tests/test_regression.py def test_sort_bigendian(self): skip('not implemented yet') @@ -325,3 +321,13 @@ y = fromstring("\x00\x01\x00\x02", dtype="S2") x.sort(kind='q') assert (x == y).all() + + def test_string_mergesort(self): + import numpypy as np + import sys + x = np.array(['a'] * 32) + if '__pypy__' in sys.builtin_module_names: + exc = raises(NotImplementedError, "x.argsort(kind='m')") + assert 'non-numeric types' in exc.value.message + else: + assert (x.argsort(kind='m') == np.arange(32)).all() diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -163,6 +163,8 @@ if step == 0: # index only return space.wrap(self.mmap.getitem(start)) elif step == 1: + if stop - start < 0: + return space.wrap("") return space.wrap(self.mmap.getslice(start, stop - start)) else: res = "".join([self.mmap.getitem(i) diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -525,6 +525,8 @@ m = mmap(f.fileno(), 6) assert m[-3:7] == "bar" + assert m[1:0:1] == "" + f.close() def test_sequence_type(self): diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -131,6 +131,11 @@ if hasattr(os, 'fpathconf'): interpleveldefs['fpathconf'] = 'interp_posix.fpathconf' interpleveldefs['pathconf_names'] = 'space.wrap(os.pathconf_names)' + if hasattr(os, 'pathconf'): + interpleveldefs['pathconf'] = 'interp_posix.pathconf' + if hasattr(os, 'confstr'): + interpleveldefs['confstr'] = 'interp_posix.confstr' + interpleveldefs['confstr_names'] = 'space.wrap(os.confstr_names)' if hasattr(os, 'ttyname'): interpleveldefs['ttyname'] = 'interp_posix.ttyname' if hasattr(os, 'getloadavg'): @@ -155,7 +160,9 @@ for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', - 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs']: + 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs', + 'setgroups', 'initgroups', 'tcgetpgrp', 'tcsetpgrp', + 'getresuid', 'getresgid', 'setresuid', 'setresgid']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) # not visible via os, inconsistency in nt: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -987,7 +987,39 @@ Return list of supplemental group IDs for the process. """ - return space.newlist([space.wrap(e) for e in os.getgroups()]) + try: + list = os.getgroups() + except OSError, e: + raise wrap_oserror(space, e) + return space.newlist([space.wrap(e) for e in list]) + +def setgroups(space, w_list): + """ setgroups(list) + + Set the groups of the current process to list. + """ + list = [] + for w_gid in space.unpackiterable(w_list): + gid = space.int_w(w_gid) + check_uid_range(space, gid) + list.append(gid) + try: + os.setgroups(list[:]) + except OSError, e: + raise wrap_oserror(space, e) + + at unwrap_spec(username=str, gid=c_gid_t) +def initgroups(space, username, gid): + """ initgroups(username, gid) -> None + + Call the system initgroups() to initialize the group access list with all of + the groups of which the specified username is a member, plus the specified + group id. + """ + try: + os.initgroups(username, gid) + except OSError, e: + raise wrap_oserror(space, e) def getpgrp(space): """ getpgrp() -> pgrp @@ -1089,6 +1121,77 @@ raise wrap_oserror(space, e) return space.w_None + at unwrap_spec(fd=c_int) +def tcgetpgrp(space, fd): + """ tcgetpgrp(fd) -> pgid + + Return the process group associated with the terminal given by a fd. + """ + try: + pgid = os.tcgetpgrp(fd) + except OSError, e: + raise wrap_oserror(space, e) + return space.wrap(pgid) + + at unwrap_spec(fd=c_int, pgid=c_gid_t) +def tcsetpgrp(space, fd, pgid): + """ tcsetpgrp(fd, pgid) + + Set the process group associated with the terminal given by a fd. + """ + try: + os.tcsetpgrp(fd, pgid) + except OSError, e: + raise wrap_oserror(space, e) + +def getresuid(space): + """ getresuid() -> (ruid, euid, suid) + + Get tuple of the current process's real, effective, and saved user ids. + """ + try: + (ruid, euid, suid) = os.getresuid() + except OSError, e: + raise wrap_oserror(space, e) + return space.newtuple([space.wrap(ruid), + space.wrap(euid), + space.wrap(suid)]) + +def getresgid(space): + """ getresgid() -> (rgid, egid, sgid) + + Get tuple of the current process's real, effective, and saved group ids. + """ + try: + (rgid, egid, sgid) = os.getresgid() + except OSError, e: + raise wrap_oserror(space, e) + return space.newtuple([space.wrap(rgid), + space.wrap(egid), + space.wrap(sgid)]) + + at unwrap_spec(ruid=c_uid_t, euid=c_uid_t, suid=c_uid_t) +def setresuid(space, ruid, euid, suid): + """ setresuid(ruid, euid, suid) + + Set the current process's real, effective, and saved user ids. + """ + try: + os.setresuid(ruid, euid, suid) + except OSError, e: + raise wrap_oserror(space, e) + + at unwrap_spec(rgid=c_gid_t, egid=c_gid_t, sgid=c_gid_t) +def setresgid(space, rgid, egid, sgid): + """ setresgid(rgid, egid, sgid) + + Set the current process's real, effective, and saved group ids. + """ + try: + os.setresgid(rgid, egid, sgid) + except OSError, e: + raise wrap_oserror(space, e) + def declare_new_w_star(name): if name in RegisterOs.w_star_returning_int: @unwrap_spec(status=c_int) @@ -1130,15 +1233,37 @@ def sysconf(space, w_name): num = confname_w(space, w_name, os.sysconf_names) - return space.wrap(os.sysconf(num)) + try: + res = os.sysconf(num) + except OSError, e: + raise wrap_oserror(space, e) + return space.wrap(res) @unwrap_spec(fd=c_int) def fpathconf(space, fd, w_name): num = confname_w(space, w_name, os.pathconf_names) try: - return space.wrap(os.fpathconf(fd, num)) + res = os.fpathconf(fd, num) except OSError, e: raise wrap_oserror(space, e) + return space.wrap(res) + + at unwrap_spec(path='str0') +def pathconf(space, path, w_name): + num = confname_w(space, w_name, os.pathconf_names) + try: + res = os.pathconf(path, num) + except OSError, e: + raise wrap_oserror(space, e) + return space.wrap(res) + +def confstr(space, w_name): + num = confname_w(space, w_name, os.confstr_names) + try: + res = os.confstr(num) + except OSError, e: + raise wrap_oserror(space, e) + return space.wrap(res) @unwrap_spec(path='str0', uid=c_uid_t, gid=c_gid_t) def chown(space, path, uid, gid): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -78,6 +78,11 @@ cls.w_sysconf_name = space.wrap(sysconf_name) cls.w_sysconf_value = space.wrap(os.sysconf_names[sysconf_name]) cls.w_sysconf_result = space.wrap(os.sysconf(sysconf_name)) + if hasattr(os, 'confstr'): + confstr_name = os.confstr_names.keys()[0] + cls.w_confstr_name = space.wrap(confstr_name) + cls.w_confstr_value = space.wrap(os.confstr_names[confstr_name]) + cls.w_confstr_result = space.wrap(os.confstr(confstr_name)) cls.w_SIGABRT = space.wrap(signal.SIGABRT) cls.w_python = space.wrap(sys.executable) if hasattr(os, 'major'): @@ -616,6 +621,30 @@ os = self.posix assert os.getgroups() == self.getgroups + if hasattr(os, 'setgroups'): + def test_os_setgroups(self): + os = self.posix + raises(TypeError, os.setgroups, [2, 5, "hello"]) + try: + os.setgroups(os.getgroups()) + except OSError: + pass + + if hasattr(os, 'initgroups'): + def test_os_initgroups(self): + os = self.posix + raises(OSError, os.initgroups, "crW2hTQC", 100) + + if hasattr(os, 'tcgetpgrp'): + def test_os_tcgetpgrp(self): + os = self.posix + raises(OSError, os.tcgetpgrp, 9999) + + if hasattr(os, 'tcsetpgrp'): + def test_os_tcsetpgrp(self): + os = self.posix + raises(OSError, os.tcsetpgrp, 9999, 1) + if hasattr(os, 'getpgid'): def test_os_getpgid(self): os = self.posix @@ -634,6 +663,30 @@ assert os.getsid(0) == self.getsid0 raises(OSError, os.getsid, -100000) + if hasattr(os, 'getresuid'): + def test_os_getresuid(self): + os = self.posix + res = os.getresuid() + assert len(res) == 3 + + if hasattr(os, 'getresgid'): + def test_os_getresgid(self): + os = self.posix + res = os.getresgid() + assert len(res) == 3 + + if hasattr(os, 'setresuid'): + def test_os_setresuid(self): + os = self.posix + a, b, c = os.getresuid() + os.setresuid(a, b, c) + + if hasattr(os, 'setresgid'): + def test_os_setresgid(self): + os = self.posix + a, b, c = os.getresgid() + os.setresgid(a, b, c) + if hasattr(os, 'sysconf'): def test_os_sysconf(self): os = self.posix @@ -652,6 +705,25 @@ raises(OSError, os.fpathconf, -1, "PC_PIPE_BUF") raises(ValueError, os.fpathconf, 1, "##") + if hasattr(os, 'pathconf'): + def test_os_pathconf(self): + os = self.posix + assert os.pathconf("/tmp", "PC_NAME_MAX") >= 31 + # Linux: the following gets 'No such file or directory' + raises(OSError, os.pathconf, "", "PC_PIPE_BUF") + raises(ValueError, os.pathconf, "/tmp", "##") + + if hasattr(os, 'confstr'): + def test_os_confstr(self): + os = self.posix + assert os.confstr(self.confstr_value) == self.confstr_result + assert os.confstr(self.confstr_name) == self.confstr_result + assert os.confstr_names[self.confstr_name] == self.confstr_value + + def test_os_confstr_error(self): + os = self.posix + raises(ValueError, os.confstr, "!@#$%!#$!@#") + if hasattr(os, 'wait'): def test_os_wait(self): os = self.posix diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -228,7 +228,7 @@ except ImportError: sys.stderr.write('SKIP: cannot import cffi\n') return 0 - + ffi = cffi.FFI() ffi.cdef(""" @@ -301,5 +301,30 @@ f(1) # libm_name = get_libm_name(sys.platform) - log = self.run(main, [libm_name]) + self.run(main, [libm_name]) # assert did not crash + + def test_cffi_init_struct_with_list(self): + def main(n): + import sys + try: + import cffi + except ImportError: + sys.stderr.write('SKIP: cannot import cffi\n') + return 0 + + ffi = cffi.FFI() + ffi.cdef(""" + struct s { + int x; + int y; + int z; + }; + """) + + for i in xrange(n): + ffi.new("struct s *", [i, i, i]) + + log = self.run(main, [300]) + loop, = log.loops_by_filename(self.filepath) + assert False, "XXX: fill this in" diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -568,8 +568,6 @@ def _create_new_type(space, w_typetype, w_name, w_bases, w_dict): # this is in its own function because we want the special case 'type(x)' # above to be seen by the jit. - from pypy.objspace.std.typeobject import W_TypeObject - if w_bases is None or w_dict is None: raise OperationError(space.w_TypeError, space.wrap("type() takes 1 or 3 arguments")) @@ -611,7 +609,6 @@ return w_type def _precheck_for_new(space, w_type): - from pypy.objspace.std.typeobject import W_TypeObject if not isinstance(w_type, W_TypeObject): raise operationerrfmt(space.w_TypeError, "X is not a type object (%T)", w_type) @@ -620,7 +617,6 @@ # ____________________________________________________________ def _check(space, w_type, w_msg=None): - from pypy.objspace.std.typeobject import W_TypeObject if not isinstance(w_type, W_TypeObject): if w_msg is None: w_msg = space.wrap("descriptor is for 'type'") @@ -653,7 +649,6 @@ return space.newtuple(w_type.bases_w) def mro_subclasses(space, w_type, temp): - from pypy.objspace.std.typeobject import W_TypeObject, compute_mro temp.append((w_type, w_type.mro_w)) compute_mro(w_type) for w_sc in w_type.get_subclasses(): @@ -662,9 +657,6 @@ def descr_set__bases__(space, w_type, w_value): # this assumes all app-level type objects are W_TypeObject - from pypy.objspace.std.typeobject import (W_TypeObject, get_parent_layout, - check_and_find_best_base, is_mro_purely_of_types) - w_type = _check(space, w_type) if not w_type.is_heaptype(): raise operationerrfmt(space.w_TypeError, @@ -728,7 +720,6 @@ assert w_type.w_same_layout_as is get_parent_layout(w_type) # invariant def descr__base(space, w_type): - from pypy.objspace.std.typeobject import find_best_base w_type = _check(space, w_type) return find_best_base(space, w_type.bases_w) diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -304,8 +304,12 @@ except IndexError: return None if hint in self.DONT_MOVE_GPR: - self.ARGUMENTS_GPR[i] = hint - res = hint + for j in range(i): + if hint is self.ARGUMENTS_GPR[j]: + break + else: + self.ARGUMENTS_GPR[i] = hint + res = hint return res def _unused_xmm(self): diff --git a/rpython/jit/backend/x86/test/test_callbuilder.py b/rpython/jit/backend/x86/test/test_callbuilder.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/x86/test/test_callbuilder.py @@ -0,0 +1,33 @@ +from rpython.jit.backend.x86 import callbuilder +from rpython.jit.backend.x86.regloc import esi, edi, ebx, ecx, ImmedLoc + + +class FakeAssembler: + mc = None + class _regalloc: + class rm: + free_regs = [ebx] + + def __init__(self): + self._log = [] + + def _is_asmgcc(self): + return False + + def regalloc_mov(self, src, dst): + self._log.append(('mov', src, dst)) + + +def test_base_case(): + asm = FakeAssembler() + cb = callbuilder.CallBuilder64(asm, ImmedLoc(12345), [ebx, ebx]) + cb.prepare_arguments() + assert asm._log == [('mov', ebx, edi), + ('mov', ebx, esi)] + +def test_bug_call_release_gil(): + asm = FakeAssembler() + cb = callbuilder.CallBuilder64(asm, ImmedLoc(12345), [ebx, ebx]) + cb.select_call_release_gil_mode() + cb.prepare_arguments() + assert asm._log == [('mov', ebx, ecx)] diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1162,10 +1162,19 @@ v_result) def rewrite_op_direct_ptradd(self, op): - # xxx otherwise, not implemented: - assert op.args[0].concretetype == rffi.CCHARP + v_shift = op.args[1] + assert v_shift.concretetype == lltype.Signed + ops = [] # - return SpaceOperation('int_add', [op.args[0], op.args[1]], op.result) + if op.args[0].concretetype != rffi.CCHARP: + v_prod = varoftype(lltype.Signed) + by = llmemory.sizeof(op.args[0].concretetype.TO.OF) + c_by = Constant(by, lltype.Signed) + ops.append(SpaceOperation('int_mul', [v_shift, c_by], v_prod)) + v_shift = v_prod + # + ops.append(SpaceOperation('int_add', [op.args[0], v_shift], op.result)) + return ops # ---------- # Long longs, for 32-bit only. Supported operations are left unmodified, diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -993,6 +993,16 @@ int_return %i2 """, transform=True) + def test_direct_ptradd_2(self): + def f(p, n): + return lltype.direct_ptradd(p, n + 2) + self.encoding_test(f, [lltype.nullptr(rffi.SHORTP.TO), 123], """ + int_add %i1, $2 -> %i2 + int_mul %i2, $ 1> -> %i3 + int_add %i0, %i3 -> %i4 + int_return %i4 + """, transform=True) + def test_convert_float_bytes(self): from rpython.rlib.longlong2float import float2longlong, longlong2float def f(x): diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -514,7 +514,8 @@ # if metainterp_sd.warmrunnerdesc is not None: # for tests jitcounter = metainterp_sd.warmrunnerdesc.jitcounter - self.status = jitcounter.fetch_next_index() << self.ST_SHIFT + index = jitcounter.in_second_half(jitcounter.fetch_next_index()) + self.status = index << self.ST_SHIFT def make_a_counter_per_value(self, guard_value_op): assert guard_value_op.getopnum() == rop.GUARD_VALUE @@ -598,7 +599,7 @@ hash = (current_object_addr_as_int(self) * 777767777 + intval * 1442968193) - index = jitcounter.get_index(hash) + index = jitcounter.in_second_half(jitcounter.get_index(hash)) # increment = jitdriver_sd.warmstate.increment_trace_eagerness return jitcounter.tick(index, increment) diff --git a/rpython/jit/metainterp/counter.py b/rpython/jit/metainterp/counter.py --- a/rpython/jit/metainterp/counter.py +++ b/rpython/jit/metainterp/counter.py @@ -18,11 +18,20 @@ while (UINT32MAX >> self.shift) != size - 1: self.shift += 1 assert self.shift < 999, "size is not a power of two <= 2**31" - self.timetable = lltype.malloc(rffi.CArray(rffi.FLOAT), size, + # + # The table of timings. The first half is used for starting the + # compilation of new loops. The second half is used for turning + # failing guards into bridges. The two halves are split to avoid + # too much interference. + self.timetablesize = size * 2 + self.timetable = lltype.malloc(rffi.CArray(rffi.FLOAT), + self.timetablesize, flavor='raw', zero=True, track_allocation=False) + self._nextindex = r_uint(0) + # + # The table of JitCell entries, recording already-compiled loops self.celltable = [None] * size - self._nextindex = r_uint(0) # if translator is not None: class Glob: @@ -61,6 +70,10 @@ self._nextindex = (result + 1) & self.get_index(-1) return result + def in_second_half(self, index): + assert index < r_uint(self.size) + return self.size + index + def tick(self, index, increment): counter = float(self.timetable[index]) + increment if counter < 1.0: @@ -112,7 +125,7 @@ # important in corner cases where we would suddenly compile more # than one loop because all counters reach the bound at the same # time, but where compiling all but the first one is pointless. - size = self.size + size = self.timetablesize pypy__decay_jit_counters(self.timetable, self.decay_by_mult, size) @@ -152,6 +165,10 @@ "NOT_RPYTHON" pass + def in_second_half(self, index): + "NOT_RPYTHON" + return index + 12345 + def _clear_all(self): self.timetable.clear() self.celltable.clear() diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -96,8 +96,13 @@ idx += 1 def _escape(self, box): - if box in self.new_boxes: - self.new_boxes[box] = False + try: + unescaped = self.new_boxes[box] + except KeyError: + pass + else: + if unescaped: + self.new_boxes[box] = False try: deps = self.dependencies.pop(box) except KeyError: diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5125,18 +5125,39 @@ def test_str_copy_virtual_src_concrete_dst(self): ops = """ [p0] - p1 = newstr(1) + p1 = newstr(2) strsetitem(p1, 0, 101) - copystrcontent(p1, p0, 0, 0, 1) + strsetitem(p1, 1, 102) + copystrcontent(p1, p0, 0, 0, 2) finish(p0) """ expected = """ [p0] strsetitem(p0, 0, 101) + strsetitem(p0, 1, 102) finish(p0) """ self.optimize_strunicode_loop(ops, expected) + def test_str_copy_bug1(self): + ops = """ + [i0] + p1 = newstr(1) + strsetitem(p1, 0, i0) + p2 = newstr(1) + escape(p2) + copystrcontent(p1, p2, 0, 0, 1) + finish() + """ + expected = """ + [i0] + p2 = newstr(1) + escape(p2) + strsetitem(p2, 0, i0) + finish() + """ + self.optimize_strunicode_loop(ops, expected) + def test_call_pure_vstring_const(self): py.test.skip("implement me") ops = """ diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -154,6 +154,7 @@ return self._chars[index] # may return None! def setitem(self, index, charvalue): + assert self.is_virtual() assert isinstance(charvalue, optimizer.OptValue) assert self._chars[index] is None, ( "setitem() on an already-initialized location") @@ -512,29 +513,28 @@ srcstart = self.getvalue(op.getarg(2)) dststart = self.getvalue(op.getarg(3)) length = self.getvalue(op.getarg(4)) + dst_virtual = (isinstance(dst, VStringPlainValue) and dst.is_virtual()) if length.is_constant() and length.box.getint() == 0: return elif ((src.is_virtual() or src.is_constant()) and srcstart.is_constant() and dststart.is_constant() and - length.is_constant()): + length.is_constant() and + (length.force_box(self).getint() < 20 or (src.is_virtual() and dst_virtual))): src_start = srcstart.force_box(self).getint() dst_start = dststart.force_box(self).getint() - # 'length' must be <= MAX_CONST_LEN here, because 'dst' is a - # VStringPlainValue, which is limited to MAX_CONST_LEN. actual_length = length.force_box(self).getint() - assert actual_length <= MAX_CONST_LEN for index in range(actual_length): vresult = self.strgetitem(src, optimizer.ConstantValue(ConstInt(index + src_start)), mode) - if isinstance(dst, VStringPlainValue): + if dst_virtual: dst.setitem(index + dst_start, vresult) else: - op = ResOperation(mode.STRSETITEM, [ - op.getarg(1), + new_op = ResOperation(mode.STRSETITEM, [ + dst.force_box(self), ConstInt(index + dst_start), vresult.force_box(self), ], None) - self.emit_operation(op) + self.emit_operation(new_op) else: copy_str_content(self, src.force_box(self), diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -371,6 +371,7 @@ self._grow(times) def append_charpsize(self, s, size): + assert size >= 0 l = [] for i in xrange(size): l.append(s[i]) diff --git a/rpython/rtyper/lltypesystem/rbytearray.py b/rpython/rtyper/lltypesystem/rbytearray.py --- a/rpython/rtyper/lltypesystem/rbytearray.py +++ b/rpython/rtyper/lltypesystem/rbytearray.py @@ -8,10 +8,10 @@ def mallocbytearray(size): return lltype.malloc(BYTEARRAY, size) -_, copy_bytearray_contents = rstr._new_copy_contents_fun(BYTEARRAY, BYTEARRAY, +_, _, copy_bytearray_contents = rstr._new_copy_contents_fun(BYTEARRAY, BYTEARRAY, lltype.Char, 'bytearray') -_, copy_bytearray_contents_from_str = rstr._new_copy_contents_fun(rstr.STR, +_, _, copy_bytearray_contents_from_str = rstr._new_copy_contents_fun(rstr.STR, BYTEARRAY, lltype.Char, 'bytearray_from_str') diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -129,10 +129,6 @@ ('tms_cutime', rffi.INT), ('tms_cstime', rffi.INT)]) - GID_T = platform.SimpleType('gid_t', rffi.INT) - #TODO right now is used only in getgroups, may need to update other - #functions like setgid - # For now we require off_t to be the same size as LONGLONG, which is the # interface required by callers of functions that thake an argument of type # off_t @@ -655,6 +651,46 @@ return extdef([int, int], int, "ll_os.ll_fpathconf", llimpl=fpathconf_llimpl) + @registering_if(os, 'pathconf') + def register_os_pathconf(self): + c_pathconf = self.llexternal('pathconf', + [rffi.CCHARP, rffi.INT], rffi.LONG) + + def pathconf_llimpl(path, i): + rposix.set_errno(0) + res = c_pathconf(path, i) + if res == -1: + errno = rposix.get_errno() + if errno != 0: + raise OSError(errno, "pathconf failed") + return res + return extdef([str0, int], int, "ll_os.ll_pathconf", + llimpl=pathconf_llimpl) + + @registering_if(os, 'confstr') + def register_os_confstr(self): + c_confstr = self.llexternal('confstr', [rffi.INT, rffi.CCHARP, + rffi.SIZE_T], rffi.SIZE_T) + + def confstr_llimpl(i): + rposix.set_errno(0) + n = c_confstr(i, lltype.nullptr(rffi.CCHARP.TO), 0) + n = rffi.cast(lltype.Signed, n) + if n > 0: + buf = lltype.malloc(rffi.CCHARP.TO, n, flavor='raw') + try: + c_confstr(i, buf, n) + return rffi.charp2strn(buf, n) + finally: + lltype.free(buf, flavor='raw') + else: + errno = rposix.get_errno() + if errno != 0: + raise OSError(errno, "confstr failed") + return None + return extdef([int], SomeString(can_be_None=True), + "ll_os.ll_confstr", llimpl=confstr_llimpl) + @registering_if(os, 'getuid') def register_os_getuid(self): return self.extdef_for_os_function_returning_int('getuid') @@ -693,7 +729,7 @@ @registering_if(os, 'getgroups') def register_os_getgroups(self): - GP = rffi.CArrayPtr(self.GID_T) + GP = rffi.CArrayPtr(rffi.PID_T) c_getgroups = self.llexternal('getgroups', [rffi.INT, GP], rffi.INT) def getgroups_llimpl(): @@ -702,16 +738,50 @@ groups = lltype.malloc(GP.TO, n, flavor='raw') try: n = c_getgroups(n, groups) - result = [groups[i] for i in range(n)] + result = [rffi.cast(lltype.Signed, groups[i]) + for i in range(n)] finally: lltype.free(groups, flavor='raw') if n >= 0: return result raise OSError(rposix.get_errno(), "os_getgroups failed") - return extdef([], [self.GID_T], llimpl=getgroups_llimpl, + return extdef([], [int], llimpl=getgroups_llimpl, export_name="ll_os.ll_getgroups") + @registering_if(os, 'setgroups') + def register_os_setgroups(self): + GP = rffi.CArrayPtr(rffi.PID_T) + c_setgroups = self.llexternal('setgroups', [rffi.SIZE_T, GP], rffi.INT) + + def setgroups_llimpl(list): + n = len(list) + groups = lltype.malloc(GP.TO, n, flavor='raw') + try: + for i in range(n): + groups[i] = rffi.cast(rffi.PID_T, list[i]) + n = c_setgroups(rffi.cast(rffi.SIZE_T, n), groups) + finally: + lltype.free(groups, flavor='raw') + if n != 0: + raise OSError(rposix.get_errno(), "os_setgroups failed") + + return extdef([[int]], None, llimpl=setgroups_llimpl, + export_name="ll_os.ll_setgroups") + + @registering_if(os, 'initgroups') + def register_os_initgroups(self): + c_initgroups = self.llexternal('initgroups', + [rffi.CCHARP, rffi.PID_T], rffi.INT) + + def initgroups_llimpl(user, group): + n = c_initgroups(user, rffi.cast(rffi.PID_T, group)) + if n != 0: + raise OSError(rposix.get_errno(), "os_initgroups failed") + + return extdef([str, int], None, llimpl=initgroups_llimpl, + export_name="ll_os.ll_initgroups") + @registering_if(os, 'getpgrp') def register_os_getpgrp(self): name = 'getpgrp' @@ -747,6 +817,35 @@ else: return self.extdef_for_os_function_accepting_0int(name) + @registering_if(os, 'tcgetpgrp') + def register_os_tcgetpgrp(self): + c_tcgetpgrp = self.llexternal('tcgetpgrp', [rffi.INT], rffi.PID_T) + + def c_tcgetpgrp_llimpl(fd): + res = c_tcgetpgrp(rffi.cast(rffi.INT, fd)) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "tcgetpgrp failed") + return res + + return extdef([int], int, llimpl=c_tcgetpgrp_llimpl, + export_name='ll_os.ll_os_tcgetpgrp') + + @registering_if(os, 'tcsetpgrp') + def register_os_tcsetpgrp(self): + c_tcsetpgrp = self.llexternal('tcsetpgrp', [rffi.INT, rffi.PID_T], + rffi.INT) + + def c_tcsetpgrp_llimpl(fd, pgrp): + res = c_tcsetpgrp(rffi.cast(rffi.INT, fd), + rffi.cast(rffi.PID_T, pgrp)) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "tcsetpgrp failed") + + return extdef([int, int], None, llimpl=c_tcsetpgrp_llimpl, + export_name='ll_os.ll_os_tcsetpgrp') + @registering_if(os, 'getppid') def register_os_getppid(self): return self.extdef_for_os_function_returning_int('getppid') @@ -775,6 +874,76 @@ def register_os_setsid(self): return self.extdef_for_os_function_returning_int('setsid') + @registering_if(os, 'getresuid') + def register_os_getresuid(self): + c_getresuid = self.llexternal('getresuid', [rffi.INTP] * 3, rffi.INT) + + def c_getresuid_llimpl(): + out = lltype.malloc(rffi.INTP.TO, 3, flavor='raw') + try: + res = c_getresuid(rffi.ptradd(out, 0), + rffi.ptradd(out, 1), + rffi.ptradd(out, 2)) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "getresuid failed") + return (rffi.cast(lltype.Signed, out[0]), + rffi.cast(lltype.Signed, out[1]), + rffi.cast(lltype.Signed, out[2])) + finally: + lltype.free(out, flavor='raw') + + return extdef([], (int, int, int), llimpl=c_getresuid_llimpl, + export_name='ll_os.ll_os_getresuid') + + @registering_if(os, 'getresgid') + def register_os_getresgid(self): + c_getresgid = self.llexternal('getresgid', [rffi.INTP] * 3, rffi.INT) + + def c_getresgid_llimpl(): + out = lltype.malloc(rffi.INTP.TO, 3, flavor='raw') + try: + res = c_getresgid(rffi.ptradd(out, 0), + rffi.ptradd(out, 1), + rffi.ptradd(out, 2)) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "getresgid failed") + return (rffi.cast(lltype.Signed, out[0]), + rffi.cast(lltype.Signed, out[1]), + rffi.cast(lltype.Signed, out[2])) + finally: + lltype.free(out, flavor='raw') + + return extdef([], (int, int, int), llimpl=c_getresgid_llimpl, + export_name='ll_os.ll_os_getresgid') + + @registering_if(os, 'setresuid') + def register_os_setresuid(self): + c_setresuid = self.llexternal('setresuid', [rffi.INT] * 3, rffi.INT) + + def c_setresuid_llimpl(ruid, euid, suid): + res = c_setresuid(ruid, euid, suid) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "setresuid failed") + + return extdef([int, int, int], None, llimpl=c_setresuid_llimpl, + export_name='ll_os.ll_os_setresuid') + + @registering_if(os, 'setresgid') + def register_os_setresgid(self): + c_setresgid = self.llexternal('setresgid', [rffi.INT] * 3, rffi.INT) + + def c_setresgid_llimpl(rgid, egid, sgid): + res = c_setresgid(rgid, egid, sgid) + res = rffi.cast(lltype.Signed, res) + if res == -1: + raise OSError(rposix.get_errno(), "setresgid failed") + + return extdef([int, int, int], None, llimpl=c_setresgid_llimpl, + export_name='ll_os.ll_os_setresgid') + @registering_str_unicode(os.open) def register_os_open(self, traits): os_open = self.llexternal(traits.posix_function_name('open'), diff --git a/rpython/rtyper/module/test/test_posix.py b/rpython/rtyper/module/test/test_posix.py --- a/rpython/rtyper/module/test/test_posix.py +++ b/rpython/rtyper/module/test/test_posix.py @@ -1,5 +1,6 @@ import py from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.rtyper.annlowlevel import hlstr from rpython.tool.udir import udir from rpython.rlib.rarithmetic import is_valid_int @@ -176,6 +177,27 @@ return os.sysconf(i) assert self.interpret(f, [13]) == f(13) + if hasattr(os, 'confstr'): + def test_os_confstr(self): + def f(i): + try: + return os.confstr(i) + except OSError: + return "oooops!!" + some_value = os.confstr_names.values()[-1] + res = self.interpret(f, [some_value]) + assert hlstr(res) == f(some_value) + res = self.interpret(f, [94781413]) + assert hlstr(res) == "oooops!!" + + if hasattr(os, 'pathconf'): + def test_os_pathconf(self): + def f(i): + return os.pathconf("/tmp", i) + i = os.pathconf_names["PC_NAME_MAX"] + some_value = self.interpret(f, [i]) + assert some_value >= 31 + if hasattr(os, 'chroot'): def test_os_chroot(self): def f(): @@ -205,3 +227,78 @@ return os.getgroups() ll_a = self.interpret(f, []) assert self.ll_to_list(ll_a) == f() + + if hasattr(os, 'setgroups'): + def test_setgroups(self): + def f(): + try: + os.setgroups(os.getgroups()) + except OSError: + pass + self.interpret(f, []) + + if hasattr(os, 'initgroups'): + def test_initgroups(self): + def f(): + try: + os.initgroups('sUJJeumz', 4321) + except OSError: + return 1 + return 0 + res = self.interpret(f, []) + assert res == 1 + + if hasattr(os, 'tcgetpgrp'): + def test_tcgetpgrp(self): + def f(fd): + try: + return os.tcgetpgrp(fd) + except OSError: + return 42 + res = self.interpret(f, [9999]) + assert res == 42 + + if hasattr(os, 'tcsetpgrp'): + def test_tcsetpgrp(self): + def f(fd, pgrp): + try: + os.tcsetpgrp(fd, pgrp) + except OSError: + return 1 + return 0 + res = self.interpret(f, [9999, 1]) + assert res == 1 + + if hasattr(os, 'getresuid'): + def test_getresuid(self): + def f(): + a, b, c = os.getresuid() + return a + b * 37 + c * 1291 + res = self.interpret(f, []) + a, b, c = os.getresuid() + assert res == a + b * 37 + c * 1291 + + if hasattr(os, 'getresgid'): + def test_getresgid(self): + def f(): + a, b, c = os.getresgid() + return a + b * 37 + c * 1291 + res = self.interpret(f, []) + a, b, c = os.getresgid() + assert res == a + b * 37 + c * 1291 + + if hasattr(os, 'setresuid'): + def test_setresuid(self): + def f(): + a, b, c = os.getresuid() + a = (a + 1) - 1 + os.setresuid(a, b, c) + self.interpret(f, []) + + if hasattr(os, 'setresgid'): + def test_setresgid(self): + def f(): + a, b, c = os.getresgid() + a = (a + 1) - 1 + os.setresgid(a, b, c) + self.interpret(f, []) From noreply at buildbot.pypy.org Sat Nov 9 22:11:48 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 9 Nov 2013 22:11:48 +0100 (CET) Subject: [pypy-commit] pypy cpyext-int: close branch about to be merged Message-ID: <20131109211148.1116A1C030D@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: cpyext-int Changeset: r67910:2539263da790 Date: 2013-11-09 23:09 +0200 http://bitbucket.org/pypy/pypy/changeset/2539263da790/ Log: close branch about to be merged diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -91,7 +91,7 @@ .. branch: safe-win-mmap .. branch: boolean-indexing-cleanup .. branch: cpyext-best_base -.. branch: cpyext-int +.. branch: cpyext-int .. branch: fileops2 .. branch: nobold-backtrace From noreply at buildbot.pypy.org Sat Nov 9 22:11:49 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 9 Nov 2013 22:11:49 +0100 (CET) Subject: [pypy-commit] pypy default: merge cpyext-int, which fixes calling PyInt_FromLong Message-ID: <20131109211149.5C77A1C0930@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r67911:5038b9a315c9 Date: 2013-11-09 23:10 +0200 http://bitbucket.org/pypy/pypy/changeset/5038b9a315c9/ Log: merge cpyext-int, which fixes calling PyInt_FromLong diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -91,6 +91,7 @@ .. branch: safe-win-mmap .. branch: boolean-indexing-cleanup .. branch: cpyext-best_base +.. branch: cpyext-int .. branch: fileops2 .. branch: nobold-backtrace diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -21,8 +21,17 @@ "Type description of PyIntObject" make_typedescr(space.w_int.instancetypedef, basestruct=PyIntObject.TO, + attach=int_attach, realize=int_realize) +def int_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyIntObject with the given int object. The + value must not be modified. + """ + py_int = rffi.cast(PyIntObject, py_obj) + py_int.c_ob_ival = space.int_w(w_obj) + def int_realize(space, obj): intval = rffi.cast(lltype.Signed, rffi.cast(PyIntObject, obj).c_ob_ival) w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py --- a/pypy/module/cpyext/test/test_intobject.py +++ b/pypy/module/cpyext/test/test_intobject.py @@ -97,7 +97,7 @@ return (PyObject *)enumObj; """), - ], + ], prologue=""" typedef struct { @@ -166,3 +166,24 @@ assert isinstance(a, int) assert a == int(a) == 42 assert a.name == "ULTIMATE_ANSWER" + + def test_int_cast(self): + mod = self.import_extension('foo', [ + #prove it works for ints + ("test_int", "METH_NOARGS", + """ + PyObject * obj = PyInt_FromLong(42); + if (!PyInt_Check(obj)) { + Py_DECREF(obj); + PyErr_SetNone(PyExc_ValueError); + return NULL; + } + PyObject * val = PyInt_FromLong(((PyIntObject *)obj)->ob_ival); + Py_DECREF(obj); + return val; + """ + ), + ]) + i = mod.test_int() + assert isinstance(i, int) + assert i == 42 diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -1,5 +1,3 @@ -import py - from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype @@ -286,3 +284,5 @@ arr = mod.test_FromObject() dt = mod.test_DescrFromType(11) assert dt.num == 11 + + From noreply at buildbot.pypy.org Sat Nov 9 23:05:36 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 23:05:36 +0100 (CET) Subject: [pypy-commit] pypy default: Import cffi/d852277c4508 Message-ID: <20131109220536.6EC6F1C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67912:d94f6f211aa5 Date: 2013-11-09 23:04 +0100 http://bitbucket.org/pypy/pypy/changeset/d94f6f211aa5/ Log: Import cffi/d852277c4508 diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7.2" -__version_info__ = (0, 7, 2) +__version__ = "0.8" +__version_info__ = (0, 8) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,5 @@ import types +from .lock import allocate_lock try: callable @@ -61,6 +62,7 @@ # rely on it! It's probably not going to work well.) self._backend = backend + self._lock = allocate_lock() self._parser = cparser.Parser() self._cached_btypes = {} self._parsed_types = types.ModuleType('parsed_types').__dict__ @@ -74,7 +76,8 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - self.BVoidP = self._get_cached_btype(model.voidp_type) + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -95,11 +98,12 @@ if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') - self._parser.parse(csource, override=override) - self._cdefsources.append(csource) - if override: - for cache in self._function_caches: - cache.clear() + with self._lock: + self._parser.parse(csource, override=override) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -109,31 +113,49 @@ library we only look for the actual (untyped) symbols. """ assert isinstance(name, basestring) or name is None - lib, function_cache = _make_ffi_library(self, name, flags) - self._function_caches.append(function_cache) - self._libraries.append(lib) + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) return lib + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + if hasattr(type, 'as_function_pointer'): + really_a_function_type = True + type = type.as_function_pointer() + else: + really_a_function_type = False + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, cfaf = self._parsed_types[cdecl] - if consider_function_as_funcptr and not cfaf: - raise KeyError + result = self._parsed_types[cdecl] except KeyError: - key = cdecl - if not isinstance(cdecl, str): # unicode, on Python 2 - cdecl = cdecl.encode('ascii') - cfaf = consider_function_as_funcptr - type = self._parser.parse_type(cdecl, - consider_function_as_funcptr=cfaf) - btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, cfaf + with self._lock: + result = self._typeof_locked(cdecl) + # + btype, really_a_function_type = result + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) return btype def typeof(self, cdecl): """Parse the C type given as a string and return the - corresponding Python type: '>. + corresponding object. It can also be used on 'cdata' instance to get its C type. """ if isinstance(cdecl, basestring): @@ -144,6 +166,10 @@ res = _builtin_function_type(cdecl) if res is not None: return res + if (isinstance(cdecl, types.FunctionType) + and hasattr(cdecl, '_cffi_base_type')): + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) def sizeof(self, cdecl): @@ -280,14 +306,17 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + with self._lock: + try: + gc_weakrefs = self.gc_weakrefs + except AttributeError: + from .gc_weakref import GcWeakrefs + gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) + return gc_weakrefs.build(cdata, destructor) def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! try: BType = self._cached_btypes[type] except KeyError: @@ -322,7 +351,8 @@ def _pointer_to(self, ctype): from . import model - return model.pointer_cache(self, ctype) + with self._lock: + return model.pointer_cache(self, ctype) def addressof(self, cdata, field=None): """Return the address of a . @@ -342,10 +372,12 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ - self._parser.include(ffi_to_include._parser) - self._cdefsources.append('[') - self._cdefsources.extend(ffi_to_include._cdefsources) - self._cdefsources.append(']') + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) @@ -372,7 +404,7 @@ backendlib = backend.load_library(path, flags) copied_enums = [] # - def make_accessor(name): + def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: tp = ffi._parser._declarations[key] @@ -404,11 +436,17 @@ if enumname not in library.__dict__: library.__dict__[enumname] = enumval copied_enums.append(True) + if name in library.__dict__: + return # - if name in library.__dict__: # copied from an enum value just above, - return # or multithread's race condition raise AttributeError(name) # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + make_accessor_locked(name) + # class FFILibrary(object): def __getattr__(self, name): make_accessor(name) @@ -444,4 +482,5 @@ except (KeyError, AttributeError, TypeError): return None else: - return ffi._get_cached_btype(tp) + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -142,7 +142,7 @@ if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] if line: - msg = 'cannot parse "%s"\n%s' % (line, msg) + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) @@ -217,19 +217,18 @@ # if decl.name: tp = self._get_type(node, partial_length_ok=True) - if self._is_constant_declaration(node): + if self._is_constant_globalvar(node): self._declare('constant ' + decl.name, tp) else: self._declare('variable ' + decl.name, tp) - def parse_type(self, cdecl, consider_function_as_funcptr=False): + def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type, - consider_function_as_funcptr=consider_function_as_funcptr) + return self._get_type(exprnode.type) def _declare(self, name, obj): if name in self._declarations: @@ -249,28 +248,17 @@ return model.ConstPointerType(type) return model.PointerType(type) - def _get_type(self, typenode, convert_array_to_pointer=False, - name=None, partial_length_ok=False, - consider_function_as_funcptr=False): + def _get_type(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): type = self._declarations['typedef ' + typenode.type.names[0]] - if isinstance(type, model.ArrayType): - if convert_array_to_pointer: - return type.item - else: - if (consider_function_as_funcptr and - isinstance(type, model.RawFunctionType)): - return type.as_function_pointer() return type # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type - if convert_array_to_pointer: - return self._get_type_pointer(self._get_type(typenode.type)) if typenode.dim is None: length = None else: @@ -331,10 +319,7 @@ # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - result = self._parse_function_type(typenode, name) - if consider_function_as_funcptr: - result = result.as_function_pointer() - return result + return self._parse_function_type(typenode, name) # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): @@ -365,21 +350,24 @@ isinstance(params[0].type.type, pycparser.c_ast.IdentifierType) and list(params[0].type.type.names) == ['void']): del params[0] - args = [self._get_type(argdeclnode.type, - convert_array_to_pointer=True, - consider_function_as_funcptr=True) + args = [self._as_func_arg(self._get_type(argdeclnode.type)) for argdeclnode in params] result = self._get_type(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _is_constant_declaration(self, typenode, const=False): - if isinstance(typenode, pycparser.c_ast.ArrayDecl): - return self._is_constant_declaration(typenode.type) + def _as_func_arg(self, type): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _is_constant_globalvar(self, typenode): if isinstance(typenode, pycparser.c_ast.PtrDecl): - const = 'const' in typenode.quals - return self._is_constant_declaration(typenode.type, const) + return 'const' in typenode.quals if isinstance(typenode, pycparser.c_ast.TypeDecl): - return const or 'const' in typenode.quals + return 'const' in typenode.quals return False def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): @@ -491,7 +479,7 @@ return tp def _make_partial(self, tp, nested): - if not isinstance(tp, model.StructType): + if not isinstance(tp, model.StructOrUnion): raise api.CDefError("%s cannot be partial" % (tp,)) if not tp.has_c_name() and not nested: raise NotImplementedError("%s is partial but has no C name" %(tp,)) @@ -511,7 +499,7 @@ if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): self._partial_length = True - return None + return '...' # raise api.FFIError("unsupported expression: expected a " "simple numeric constant") diff --git a/lib_pypy/cffi/gc_weakref.py b/lib_pypy/cffi/gc_weakref.py --- a/lib_pypy/cffi/gc_weakref.py +++ b/lib_pypy/cffi/gc_weakref.py @@ -14,6 +14,6 @@ def build(self, cdata, destructor): # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi.typeof(cdata), cdata) + new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) self.data[ref(new_cdata, self.remove)] = destructor, cdata return new_cdata diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -192,10 +192,6 @@ _base_pattern = " const *&" _base_pattern_array = "(const *&)" - def build_backend_type(self, ffi, finishlist): - BPtr = PointerType(self.totype).get_cached_btype(ffi, finishlist) - return BPtr - const_voidp_type = ConstPointerType(void_type) @@ -216,10 +212,10 @@ self.item = item self.length = length # - if self.length is None: + if length is None or length == '...': brackets = '&[]' else: - brackets = '&[%d]' % self.length + brackets = '&[%d]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) @@ -227,6 +223,10 @@ return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): + if self.length == '...': + from . import api + raise api.CDefError("cannot render the type %r: unknown length" % + (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) @@ -252,6 +252,7 @@ class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None completed = False + partial = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -303,20 +304,21 @@ return # not completing it: it's an opaque struct # self.completed = 1 - fldtypes = tuple(tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes) # if self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) ffi._backend.complete_struct_or_union(BType, lst, self) # else: + fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # - if isinstance(ftype, ArrayType) and ftype.length is None: + if isinstance(ftype, ArrayType) and ftype.length == '...': # fix the length to match the total size BItemType = ftype.item.get_cached_btype(ffi, finishlist) nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) @@ -327,18 +329,20 @@ ftype = ftype.resolve_length(nlen) self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) - BArrayType = ftype.get_cached_btype(ffi, finishlist) - fldtypes = (fldtypes[:i] + (BArrayType,) + - fldtypes[i+1:]) - continue # - bitemsize = ffi.sizeof(fldtypes[i]) - if bitemsize != fsize: - self._verification_error( - "field '%s.%s' is declared as %d bytes, but is " - "really %d bytes" % (self.name, - self.fldnames[i] or '{}', - bitemsize, fsize)) + BFieldType = ftype.get_cached_btype(ffi, finishlist) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) + fldtypes.append(BFieldType) + # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) ffi._backend.complete_struct_or_union(BType, lst, self, totalsize, totalalignment) @@ -348,11 +352,6 @@ from .ffiplatform import VerificationError raise VerificationError(msg) - -class StructType(StructOrUnion): - kind = 'struct' - partial = False - def check_not_partial(self): if self.partial and self.fixedlayout is None: from . import ffiplatform @@ -361,19 +360,18 @@ def build_backend_type(self, ffi, finishlist): self.check_not_partial() finishlist.append(self) - - return global_cache(self, ffi, 'new_struct_type', + # + return global_cache(self, ffi, 'new_%s_type' % self.kind, self.get_official_name(), key=self) +class StructType(StructOrUnion): + kind = 'struct' + + class UnionType(StructOrUnion): kind = 'union' - def build_backend_type(self, ffi, finishlist): - finishlist.append(self) - return global_cache(self, ffi, 'new_union_type', - self.get_official_name(), key=self) - class EnumType(StructOrUnionOrEnum): kind = 'enum' @@ -387,6 +385,12 @@ self.baseinttype = baseinttype self.build_c_name_with_marker() + def force_the_name(self, forcename): + StructOrUnionOrEnum.force_the_name(self, forcename) + if self.forcename is None: + name = self.get_official_name() + self.forcename = '$' + name.replace(' ', '_') + def check_not_partial(self): if self.partial and not self.partial_resolved: from . import ffiplatform diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -15,7 +15,7 @@ def patch_extension_kwds(self, kwds): pass - def find_module(self, module_name, path, so_suffix): + def find_module(self, module_name, path, so_suffixes): try: f, filename, descr = imp.find_module(module_name, path) except ImportError: @@ -25,7 +25,7 @@ # Note that after a setuptools installation, there are both .py # and .so files with the same basename. The code here relies on # imp.find_module() locating the .so in priority. - if descr[0] != so_suffix: + if descr[0] not in so_suffixes: return None return filename @@ -280,8 +280,8 @@ return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, model.ArrayType): - return '_cffi_from_c_deref((char *)%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) elif isinstance(tp, model.StructType): if tp.fldnames is None: raise TypeError("'%s' is used as %s, but is opaque" % ( @@ -464,11 +464,14 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return _cffi_get_struct_layout(nums);') @@ -491,7 +494,7 @@ # function = getattr(module, layoutfuncname) layout = function() - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -528,9 +531,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -566,7 +570,7 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True): + vartp=None, delayed=True, size_too=False): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -597,6 +601,15 @@ '(unsigned long long)(%s));' % (name,)) prnt(' if (o == NULL)') prnt(' return -1;') + if size_too: + prnt(' {') + prnt(' PyObject *o1 = o;') + prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));' + % (name,)) + prnt(' Py_DECREF(o1);') + prnt(' if (o == NULL)') + prnt(' return -1;') + prnt(' }') prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name) prnt(' Py_DECREF(o);') prnt(' if (res < 0)') @@ -677,15 +690,16 @@ def _generate_cpy_variable_collecttype(self, tp, name): if isinstance(tp, model.ArrayType): - self._do_collect_type(tp) + tp_ptr = model.PointerType(tp.item) else: tp_ptr = model.PointerType(tp) - self._do_collect_type(tp_ptr) + self._do_collect_type(tp_ptr) def _generate_cpy_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): tp_ptr = model.PointerType(tp.item) - self._generate_cpy_const(False, name, tp, vartp=tp_ptr) + self._generate_cpy_const(False, name, tp, vartp=tp_ptr, + size_too = (tp.length == '...')) else: tp_ptr = model.PointerType(tp) self._generate_cpy_const(False, name, tp_ptr, category='var') @@ -694,11 +708,29 @@ _loading_cpy_variable = _loaded_noop def _loaded_cpy_variable(self, tp, name, module, library): + value = getattr(library, name) if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the - return # sense that "a=..." is forbidden + # sense that "a=..." is forbidden + if tp.length == '...': + assert isinstance(value, tuple) + (value, size) = value + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. - ptr = getattr(library, name) + ptr = value delattr(library, name) def getter(library): return ptr[0] diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -20,15 +20,15 @@ # up in kwds['export_symbols']. kwds.setdefault('export_symbols', self.export_symbols) - def find_module(self, module_name, path, so_suffix): - basename = module_name + so_suffix - if path is None: - path = sys.path - for dirname in path: - filename = os.path.join(dirname, basename) - if os.path.isfile(filename): - return filename - return None + def find_module(self, module_name, path, so_suffixes): + for so_suffix in so_suffixes: + basename = module_name + so_suffix + if path is None: + path = sys.path + for dirname in path: + filename = os.path.join(dirname, basename) + if os.path.isfile(filename): + return filename def collect_types(self): pass # not needed in the generic engine @@ -173,6 +173,7 @@ newfunction = self._load_constant(False, tp, name, module) else: indirections = [] + base_tp = tp if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): indirect_args = [] for i, typ in enumerate(tp.args): @@ -186,16 +187,18 @@ wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) for i, typ in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, typ) + newfunction = self._make_struct_wrapper(newfunction, i, typ, + base_tp) setattr(library, name, newfunction) type(library)._cffi_dir.append(name) - def _make_struct_wrapper(self, oldfunc, i, tp): + def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): backend = self.ffi._backend BType = self.ffi._get_cached_btype(tp) def newfunc(*args): args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] return oldfunc(*args) + newfunc._cffi_base_type = base_tp return newfunc # ---------- @@ -252,11 +255,14 @@ prnt(' static ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return nums[i];') @@ -270,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi.typeof("ssize_t(*)(ssize_t)") + BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -279,7 +285,7 @@ if x < 0: break layout.append(x) num += 1 - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -316,9 +322,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -379,15 +386,16 @@ def _load_constant(self, is_int, tp, name, module): funcname = '_cffi_const_%s' % name if is_int: - BFunc = self.ffi.typeof("int(*)(long long*)") + BType = self.ffi._typeof_locked("long long*")[0] + BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) - p = self.ffi.new("long long*") + p = self.ffi.new(BType) negative = function(p) value = int(p[0]) if value < 0 and not negative: value += (1 << (8*self.ffi.sizeof("long long"))) else: - BFunc = self.ffi.typeof(tp.get_c_name('(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() return value @@ -431,10 +439,11 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BFunc = self.ffi.typeof("int(*)(char*)") + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = '_cffi_e_%s_%s' % (prefix, name) function = module.load_function(BFunc, funcname) - p = self.ffi.new("char[]", 256) + p = self.ffi.new(BType, 256) if function(p) < 0: error = self.ffi.string(p) if sys.version_info >= (3,): @@ -465,6 +474,14 @@ def _generate_gen_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): + if tp.length == '...': + prnt = self._prnt + funcname = '_cffi_sizeof_%s' % (name,) + self.export_symbols.append(funcname) + prnt("size_t %s(void)" % funcname) + prnt("{") + prnt(" return sizeof(%s);" % (name,)) + prnt("}") tp_ptr = model.PointerType(tp.item) self._generate_gen_const(False, name, tp_ptr) else: @@ -476,6 +493,18 @@ def _loaded_gen_variable(self, tp, name, module, library): if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the # sense that "a=..." is forbidden + if tp.length == '...': + funcname = '_cffi_sizeof_%s' % (name,) + BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0] + function = module.load_function(BFunc, funcname) + size = function() + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) tp_ptr = model.PointerType(tp.item) value = self._load_constant(False, tp_ptr, name, module) # 'value' is a which we have to replace with @@ -489,7 +518,7 @@ # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. funcname = '_cffi_var_%s' % name - BFunc = self.ffi.typeof(tp.get_c_name('*(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0] function = module.load_function(BFunc, funcname) ptr = function() def getter(library): diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -31,7 +31,7 @@ k2 = k2.lstrip('0').rstrip('L') modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key, k1, k2) - suffix = _get_so_suffix() + suffix = _get_so_suffixes()[0] self.tmpdir = tmpdir or _caller_dir_pycache() self.sourcefilename = os.path.join(self.tmpdir, modulename + '.c') self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) @@ -42,18 +42,21 @@ def write_source(self, file=None): """Write the C source code. It is produced in 'self.sourcefilename', which can be tweaked beforehand.""" - if self._has_source and file is None: - raise ffiplatform.VerificationError("source code already written") - self._write_source(file) + with self.ffi._lock: + if self._has_source and file is None: + raise ffiplatform.VerificationError( + "source code already written") + self._write_source(file) def compile_module(self): """Write the C source code (if not done already) and compile it. This produces a dynamic link library in 'self.modulefilename'.""" - if self._has_module: - raise ffiplatform.VerificationError("module already compiled") - if not self._has_source: - self._write_source() - self._compile_module() + with self.ffi._lock: + if self._has_module: + raise ffiplatform.VerificationError("module already compiled") + if not self._has_source: + self._write_source() + self._compile_module() def load_library(self): """Get a C module from this Verifier instance. @@ -62,11 +65,14 @@ operations to the C module. If necessary, the C code is written and compiled first. """ - if not self._has_module: - self._locate_module() + with self.ffi._lock: if not self._has_module: - self.compile_module() - return self._load_library() + self._locate_module() + if not self._has_module: + if not self._has_source: + self._write_source() + self._compile_module() + return self._load_library() def get_module_name(self): basename = os.path.basename(self.modulefilename) @@ -81,7 +87,9 @@ def get_extension(self): if not self._has_source: - self._write_source() + with self.ffi._lock: + if not self._has_source: + self._write_source() sourcename = ffiplatform.maybe_relative_path(self.sourcefilename) modname = self.get_module_name() return ffiplatform.get_extension(sourcename, modname, **self.kwds) @@ -103,7 +111,7 @@ else: path = None filename = self._vengine.find_module(self.get_module_name(), path, - _get_so_suffix()) + _get_so_suffixes()) if filename is None: return self.modulefilename = filename @@ -193,7 +201,7 @@ if keep_so: suffix = '.c' # only remove .c files else: - suffix = _get_so_suffix().lower() + suffix = _get_so_suffixes()[0].lower() for fn in filelist: if fn.lower().startswith('_cffi_') and ( fn.lower().endswith(suffix) or fn.lower().endswith('.c')): @@ -213,15 +221,20 @@ except OSError: pass -def _get_so_suffix(): +def _get_so_suffixes(): + suffixes = [] for suffix, mode, type in imp.get_suffixes(): if type == imp.C_EXTENSION: - return suffix - # bah, no C_EXTENSION available. Occurs on pypy without cpyext - if sys.platform == 'win32': - return ".pyd" - else: - return ".so" + suffixes.append(suffix) + + if not suffixes: + # bah, no C_EXTENSION available. Occurs on pypy without cpyext + if sys.platform == 'win32': + suffixes = [".pyd"] + else: + suffixes = [".so"] + + return suffixes def _ensure_dir(filename): try: diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py @@ -185,3 +185,13 @@ ffi.cdef("typedef struct { float x; } foo_t;") p = ffi.new("foo_t *", [5.2]) assert repr(p).startswith(">), , False>' def test_remove_comments(): @@ -191,7 +192,7 @@ def test_parse_error(): ffi = FFI() e = py.test.raises(CDefError, ffi.cdef, " x y z ") - assert re.match(r'cannot parse " x y z "\n:\d+:', str(e.value)) + assert re.match(r'cannot parse "x y z"\n:\d+:', str(e.value)) def test_cannot_declare_enum_later(): ffi = FFI() @@ -256,3 +257,27 @@ py.test.skip("Only for Windows") ffi = FFI() ffi.cdef("void f(WPARAM);") + +def test__is_constant_globalvar(): + from cffi.cparser import Parser, _get_parser + for input, expected_output in [ + ("int a;", False), + ("const int a;", True), + ("int *a;", False), + ("const int *a;", False), + ("int const *a;", False), + ("int *const a;", True), + ("int a[5];", False), + ("const int a[5];", False), + ("int *a[5];", False), + ("const int *a[5];", False), + ("int const *a[5];", False), + ("int *const a[5];", False), + ("int a[5][6];", False), + ("const int a[5][6];", False), + ]: + p = Parser() + ast = _get_parser().parse(input) + decl = ast.children()[0][1] + node = decl.type + assert p._is_constant_globalvar(node) == expected_output diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py @@ -7,17 +7,20 @@ if sys.platform == 'win32': pass # no obvious -Werror equivalent on MSVC -elif (sys.platform == 'darwin' and - [int(x) for x in os.uname()[2].split('.')] >= [11, 0, 0]): - pass # recent MacOSX come with clang by default, and passing some - # flags from the interpreter (-mno-fused-madd) generates a - # warning --- which is interpreted as an error with -Werror else: - # assume a standard GCC + if (sys.platform == 'darwin' and + [int(x) for x in os.uname()[2].split('.')] >= [11, 0, 0]): + # special things for clang + extra_compile_args = [ + '-Werror', '-Qunused-arguments', '-Wno-error=shorten-64-to-32'] + else: + # assume a standard gcc + extra_compile_args = ['-Werror'] + class FFI(FFI): def verify(self, *args, **kwds): return super(FFI, self).verify( - *args, extra_compile_args=['-Werror'], **kwds) + *args, extra_compile_args=extra_compile_args, **kwds) def setup_module(): import cffi.verifier @@ -477,32 +480,71 @@ s = ffi.new("struct foo_s *") assert ffi.sizeof(s.a) == 17 * ffi.sizeof('int') -def test_struct_array_guess_length(): +def test_struct_array_no_length(): ffi = FFI() - ffi.cdef("struct foo_s { int a[]; ...; };") # <= no declared length - ffi.verify("struct foo_s { int x; int a[17]; int y; };") - assert ffi.sizeof('struct foo_s') == 19 * ffi.sizeof('int') - s = ffi.new("struct foo_s *") - assert ffi.sizeof(s.a) == 17 * ffi.sizeof('int') - -def test_struct_array_guess_length_2(): - ffi = FFI() - ffi.cdef("struct foo_s { int a[]; ...; };\n" # <= no declared length + ffi.cdef("struct foo_s { int a[]; int y; ...; };\n" "int bar(struct foo_s *);\n") lib = ffi.verify("struct foo_s { int x; int a[17]; int y; };\n" "int bar(struct foo_s *f) { return f->a[14]; }\n") assert ffi.sizeof('struct foo_s') == 19 * ffi.sizeof('int') s = ffi.new("struct foo_s *") + assert ffi.typeof(s.a) is ffi.typeof('int *') # because no length s.a[14] = 4242 assert lib.bar(s) == 4242 + # with no declared length, out-of-bound accesses are not detected + s.a[17] = -521 + assert s.y == s.a[17] == -521 + # + s = ffi.new("struct foo_s *", {'a': list(range(17))}) + assert s.a[16] == 16 + # overflows at construction time not detected either + s = ffi.new("struct foo_s *", {'a': list(range(18))}) + assert s.y == s.a[17] == 17 -def test_struct_array_guess_length_3(): +def test_struct_array_guess_length(): ffi = FFI() ffi.cdef("struct foo_s { int a[...]; };") ffi.verify("struct foo_s { int x; int a[17]; int y; };") assert ffi.sizeof('struct foo_s') == 19 * ffi.sizeof('int') s = ffi.new("struct foo_s *") assert ffi.sizeof(s.a) == 17 * ffi.sizeof('int') + py.test.raises(IndexError, 's.a[17]') + +def test_struct_array_c99_1(): + if sys.platform == 'win32': + py.test.skip("requires C99") + ffi = FFI() + ffi.cdef("struct foo_s { int x; int a[]; };") + ffi.verify("struct foo_s { int x; int a[]; };") + assert ffi.sizeof('struct foo_s') == 1 * ffi.sizeof('int') + s = ffi.new("struct foo_s *", [424242, 4]) + assert ffi.sizeof(s[0]) == 1 * ffi.sizeof('int') # the same in C + assert s.a[3] == 0 + s = ffi.new("struct foo_s *", [424242, [-40, -30, -20, -10]]) + assert ffi.sizeof(s[0]) == 1 * ffi.sizeof('int') + assert s.a[3] == -10 + s = ffi.new("struct foo_s *") + assert ffi.sizeof(s[0]) == 1 * ffi.sizeof('int') + s = ffi.new("struct foo_s *", [424242]) + assert ffi.sizeof(s[0]) == 1 * ffi.sizeof('int') + +def test_struct_array_c99_2(): + if sys.platform == 'win32': + py.test.skip("requires C99") + ffi = FFI() + ffi.cdef("struct foo_s { int x; int a[]; ...; };") + ffi.verify("struct foo_s { int x, y; int a[]; };") + assert ffi.sizeof('struct foo_s') == 2 * ffi.sizeof('int') + s = ffi.new("struct foo_s *", [424242, 4]) + assert ffi.sizeof(s[0]) == 2 * ffi.sizeof('int') + assert s.a[3] == 0 + s = ffi.new("struct foo_s *", [424242, [-40, -30, -20, -10]]) + assert ffi.sizeof(s[0]) == 2 * ffi.sizeof('int') + assert s.a[3] == -10 + s = ffi.new("struct foo_s *") + assert ffi.sizeof(s[0]) == 2 * ffi.sizeof('int') + s = ffi.new("struct foo_s *", [424242]) + assert ffi.sizeof(s[0]) == 2 * ffi.sizeof('int') def test_struct_ptr_to_array_field(): ffi = FFI() @@ -614,6 +656,21 @@ s.x = 17 assert s.x == 17 +def test_anonymous_enum(): + ffi = FFI() + ffi.cdef("enum { EE1 }; enum { EE2, EE3 };") + lib = ffi.verify("enum { EE1 }; enum { EE2, EE3 };") + assert lib.EE1 == 0 + assert lib.EE2 == 0 + assert lib.EE3 == 1 + +def test_nonfull_anonymous_enum(): + ffi = FFI() + ffi.cdef("enum { EE1, ... }; enum { EE3, ... };") + lib = ffi.verify("enum { EE2, EE1 }; enum { EE3 };") + assert lib.EE1 == 1 + assert lib.EE3 == 0 + def test_nonfull_enum_syntax2(): ffi = FFI() ffi.cdef("enum ee { EE1, EE2=\t..., EE3 };") @@ -1160,6 +1217,36 @@ ffi.cdef("union foo_u { char x; long *z; };") ffi.verify("union foo_u { char x; int y; long *z; };") +def test_ffi_union_partial(): + ffi = FFI() + ffi.cdef("union foo_u { char x; ...; };") + ffi.verify("union foo_u { char x; int y; };") + assert ffi.sizeof("union foo_u") == 4 + +def test_ffi_union_with_partial_struct(): + ffi = FFI() + ffi.cdef("struct foo_s { int x; ...; }; union foo_u { struct foo_s s; };") + ffi.verify("struct foo_s { int a; int x; }; " + "union foo_u { char b[32]; struct foo_s s; };") + assert ffi.sizeof("struct foo_s") == 8 + assert ffi.sizeof("union foo_u") == 32 + +def test_ffi_union_partial_2(): + ffi = FFI() + ffi.cdef("typedef union { char x; ...; } u1;") + ffi.verify("typedef union { char x; int y; } u1;") + assert ffi.sizeof("u1") == 4 + +def test_ffi_union_with_partial_struct_2(): + ffi = FFI() + ffi.cdef("typedef struct { int x; ...; } s1;" + "typedef union { s1 s; } u1;") + ffi.verify("typedef struct { int a; int x; } s1; " + "typedef union { char b[32]; s1 s; } u1;") + assert ffi.sizeof("s1") == 8 + assert ffi.sizeof("u1") == 32 + assert ffi.offsetof("u1", "s") == 0 + def test_ffi_struct_packed(): if sys.platform == 'win32': py.test.skip("needs a GCC extension") @@ -1423,7 +1510,12 @@ ffi = FFI() ffi.cdef("int fooarray[...];") lib = ffi.verify("int fooarray[50];") - assert repr(lib.fooarray).startswith("" + +def test_bug_const_char_ptr_array_1(): + ffi = FFI() + ffi.cdef("""const char *a[...];""") + lib = ffi.verify("""const char *a[5];""") + assert repr(ffi.typeof(lib.a)) == "" + +def test_bug_const_char_ptr_array_2(): + from cffi import FFI # ignore warnings + ffi = FFI() + ffi.cdef("""const int a[];""") + lib = ffi.verify("""const int a[5];""") + assert repr(ffi.typeof(lib.a)) == "" diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py b/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py @@ -2,7 +2,7 @@ import sys, os, imp, math, shutil import py from cffi import FFI, FFIError -from cffi.verifier import Verifier, _locate_engine_class, _get_so_suffix +from cffi.verifier import Verifier, _locate_engine_class, _get_so_suffixes from cffi.ffiplatform import maybe_relative_path from pypy.module.test_lib_pypy.cffi_tests.udir import udir @@ -250,7 +250,7 @@ lib = ffi.verify(csrc, force_generic_engine=self.generic, modulename=modname) assert lib.test1foo(143) == 80.0 - suffix = _get_so_suffix() + suffix = _get_so_suffixes()[0] fn1 = os.path.join(ffi.verifier.tmpdir, modname + '.c') fn2 = os.path.join(ffi.verifier.tmpdir, modname + suffix) assert ffi.verifier.sourcefilename == fn1 From noreply at buildbot.pypy.org Sat Nov 9 23:09:22 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 23:09:22 +0100 (CET) Subject: [pypy-commit] pypy default: Add missing file Message-ID: <20131109220922.4B9531C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67913:0d30ad15b538 Date: 2013-11-09 23:08 +0100 http://bitbucket.org/pypy/pypy/changeset/0d30ad15b538/ Log: Add missing file diff --git a/lib_pypy/cffi/lock.py b/lib_pypy/cffi/lock.py new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) From noreply at buildbot.pypy.org Sat Nov 9 23:18:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 23:18:04 +0100 (CET) Subject: [pypy-commit] pypy default: Update version number Message-ID: <20131109221804.04DA11C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67914:e51f3f0f19cc Date: 2013-11-09 23:17 +0100 http://bitbucket.org/pypy/pypy/changeset/e51f3f0f19cc/ Log: Update version number diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.7 +Version: 0.8 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski From noreply at buildbot.pypy.org Sat Nov 9 23:29:42 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 23:29:42 +0100 (CET) Subject: [pypy-commit] cffi default: Test and fix: a remaining deadlock Message-ID: <20131109222942.2C00D1C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1405:ec61ea587ec6 Date: 2013-11-09 23:29 +0100 http://bitbucket.org/cffi/cffi/changeset/ec61ea587ec6/ Log: Test and fix: a remaining deadlock diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -393,7 +393,8 @@ negative = function(p) value = int(p[0]) if value < 0 and not negative: - value += (1 << (8*self.ffi.sizeof("long long"))) + BLongLong = self.ffi._typeof_locked("long long")[0] + value += (1 << (8*self.ffi.sizeof(BLongLong))) else: BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -707,11 +707,14 @@ def test_define_int(): ffi = FFI() ffi.cdef("#define FOO ...\n" - "\t#\tdefine\tBAR\t...\t") + "\t#\tdefine\tBAR\t...\t\n" + "#define BAZ ...\n") lib = ffi.verify("#define FOO 42\n" - "#define BAR (-44)\n") + "#define BAR (-44)\n" + "#define BAZ 0xffffffffffffffffLL\n") assert lib.FOO == 42 assert lib.BAR == -44 + assert lib.BAZ == 0xffffffffffffffff def test_access_variable(): ffi = FFI() From noreply at buildbot.pypy.org Sat Nov 9 23:30:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Nov 2013 23:30:47 +0100 (CET) Subject: [pypy-commit] pypy default: Import cffi/ec61ea587ec6 Message-ID: <20131109223047.72EBB1C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67915:ca509b853772 Date: 2013-11-09 23:30 +0100 http://bitbucket.org/pypy/pypy/changeset/ca509b853772/ Log: Import cffi/ec61ea587ec6 diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -393,7 +393,8 @@ negative = function(p) value = int(p[0]) if value < 0 and not negative: - value += (1 << (8*self.ffi.sizeof("long long"))) + BLongLong = self.ffi._typeof_locked("long long")[0] + value += (1 << (8*self.ffi.sizeof(BLongLong))) else: BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py @@ -708,11 +708,14 @@ def test_define_int(): ffi = FFI() ffi.cdef("#define FOO ...\n" - "\t#\tdefine\tBAR\t...\t") + "\t#\tdefine\tBAR\t...\t\n" + "#define BAZ ...\n") lib = ffi.verify("#define FOO 42\n" - "#define BAR (-44)\n") + "#define BAR (-44)\n" + "#define BAZ 0xffffffffffffffffLL\n") assert lib.FOO == 42 assert lib.BAR == -44 + assert lib.BAZ == 0xffffffffffffffff def test_access_variable(): ffi = FFI() From noreply at buildbot.pypy.org Sun Nov 10 00:05:48 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 10 Nov 2013 00:05:48 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.5: merge default into branch Message-ID: <20131109230548.7B7CA1C1051@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: stdlib-2.7.5 Changeset: r67916:61633f279ebe Date: 2013-11-09 22:51 +0200 http://bitbucket.org/pypy/pypy/changeset/61633f279ebe/ Log: merge default into branch diff too long, truncating to 2000 out of 42148 lines diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,7 +1,38 @@ all: pypy-c +PYPY_EXECUTABLE := $(shell which pypy) +URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") + +ifeq ($(PYPY_EXECUTABLE),) +RUNINTERP = python +else +RUNINTERP = $(PYPY_EXECUTABLE) +endif + pypy-c: - @echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM" - @sleep 3 - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + @echo + @echo "====================================================================" +ifeq ($(PYPY_EXECUTABLE),) + @echo "Building a regular (jitting) version of PyPy, using CPython." + @echo "This takes around 2 hours and $(URAM) GB of RAM." + @echo "Note that pre-installing a PyPy binary would reduce this time" + @echo "and produce basically the same result." +else + @echo "Building a regular (jitting) version of PyPy, using" + @echo "$(PYPY_EXECUTABLE) to run the translation itself." + @echo "This takes up to 1 hour and $(URAM) GB of RAM." +endif + @echo + @echo "For more control (e.g. to use multiple CPU cores during part of" + @echo "the process) you need to run \`\`rpython/bin/rpython'' directly." + @echo "For more information see \`\`http://pypy.org/download.html''." + @echo "====================================================================" + @echo + @sleep 5 + $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + +# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are +# replaced with an opaque --jobserver option by the time this Makefile +# runs. We cannot get their original value either: +# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -1,5 +1,5 @@ ===================================== -PyPy: Python in Python Implementation +PyPy: Python in Python Implementation ===================================== Welcome to PyPy! @@ -26,9 +26,11 @@ Building ======== -build with:: +build with: - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py +.. code-block:: console + + $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest to use virtualenv with the resulting pypy-c as the interpreter, you can diff --git a/lib-python/2.7/argparse.py b/lib-python/2.7/argparse.py --- a/lib-python/2.7/argparse.py +++ b/lib-python/2.7/argparse.py @@ -1780,7 +1780,19 @@ # error if this argument is not allowed with other previously # seen arguments, assuming that actions that use the default # value don't really count as "present" - if argument_values is not action.default: + + # XXX PyPy bug-to-bug compatibility: "is" on primitive types + # is not consistent in CPython. We'll assume it is close + # enough for ints (which is true only for "small ints"), but + # for floats and longs and complexes we'll go for the option + # of forcing "is" to say False, like it usually does on + # CPython. A fix is pending on CPython trunk + # (http://bugs.python.org/issue18943) but that might change + # the details of the semantics and so not be applied to 2.7. + # See the line AA below. + + if (argument_values is not action.default or + type(argument_values) in (float, long, complex)): # AA seen_non_default_actions.add(action) for conflict_action in action_conflicts.get(action, []): if conflict_action in seen_non_default_actions: diff --git a/lib-python/2.7/string.py b/lib-python/2.7/string.py --- a/lib-python/2.7/string.py +++ b/lib-python/2.7/string.py @@ -66,16 +66,17 @@ must be of the same length. """ - if len(fromstr) != len(tostr): + n = len(fromstr) + if n != len(tostr): raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) + # this function has been rewritten to suit PyPy better; it is + # almost 10x faster than the original. + buf = bytearray(256) + for i in range(256): + buf[i] = i + for i in range(n): + buf[ord(fromstr[i])] = tostr[i] + return str(buf) diff --git a/lib-python/2.7/uuid.py b/lib-python/2.7/uuid.py --- a/lib-python/2.7/uuid.py +++ b/lib-python/2.7/uuid.py @@ -44,6 +44,8 @@ UUID('00010203-0405-0607-0809-0a0b0c0d0e0f') """ +import struct + __author__ = 'Ka-Ping Yee ' RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [ @@ -125,25 +127,39 @@ overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'. """ - if [hex, bytes, bytes_le, fields, int].count(None) != 4: - raise TypeError('need one of hex, bytes, bytes_le, fields, or int') if hex is not None: + if (bytes is not None or bytes_le is not None or + fields is not None or int is not None): + raise TypeError('if the hex argument is given, bytes,' + ' bytes_le, fields, and int need to be None') hex = hex.replace('urn:', '').replace('uuid:', '') hex = hex.strip('{}').replace('-', '') if len(hex) != 32: raise ValueError('badly formed hexadecimal UUID string') int = long(hex, 16) - if bytes_le is not None: + elif bytes_le is not None: + if bytes is not None or fields is not None or int is not None: + raise TypeError('if the bytes_le argument is given, bytes,' + ' fields, and int need to be None') if len(bytes_le) != 16: raise ValueError('bytes_le is not a 16-char string') bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] + bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] + bytes_le[8:]) - if bytes is not None: + int = (struct.unpack('>Q', bytes[:8])[0] << 64 | + struct.unpack('>Q', bytes[8:])[0]) + elif bytes is not None: + if fields is not None or int is not None: + raise TypeError('if the bytes argument is given, fields ' + 'and int need to be None') if len(bytes) != 16: raise ValueError('bytes is not a 16-char string') - int = long(('%02x'*16) % tuple(map(ord, bytes)), 16) - if fields is not None: + int = (struct.unpack('>Q', bytes[:8])[0] << 64 | + struct.unpack('>Q', bytes[8:])[0]) + elif fields is not None: + if int is not None: + raise TypeError('if the fields argument is given, int needs' + ' to be None') if len(fields) != 6: raise ValueError('fields is not a 6-tuple') (time_low, time_mid, time_hi_version, @@ -163,9 +179,12 @@ clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low int = ((time_low << 96L) | (time_mid << 80L) | (time_hi_version << 64L) | (clock_seq << 48L) | node) - if int is not None: + elif int is not None: if not 0 <= int < 1<<128L: raise ValueError('int is out of range (need a 128-bit value)') + else: + raise TypeError('one of hex, bytes, bytes_le, fields,' + ' or int need to be not None') if version is not None: if not 1 <= version <= 5: raise ValueError('illegal version number') @@ -175,7 +194,7 @@ # Set the version number. int &= ~(0xf000 << 64L) int |= version << 76L - self.__dict__['int'] = int + object.__setattr__(self, 'int', int) def __cmp__(self, other): if isinstance(other, UUID): diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -1,6 +1,9 @@ """Reimplementation of the standard extension module '_curses' using cffi.""" import sys +if sys.platform == 'win32': + #This module does not exist in windows + raise ImportError('No module named _curses') from functools import wraps from cffi import FFI diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -363,9 +363,11 @@ pass -def connect(database, **kwargs): - factory = kwargs.get("factory", Connection) - return factory(database, **kwargs) +def connect(database, timeout=5.0, detect_types=0, isolation_level="", + check_same_thread=True, factory=None, cached_statements=100): + factory = Connection if not factory else factory + return factory(database, timeout, detect_types, isolation_level, + check_same_thread, factory, cached_statements) def _unicode_text_factory(x): @@ -1238,7 +1240,10 @@ if cvt is not None: param = cvt(param) - param = adapt(param) + try: + param = adapt(param) + except: + pass # And use previous value if param is None: rc = _lib.sqlite3_bind_null(self._statement, idx) diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py --- a/lib_pypy/_tkinter/__init__.py +++ b/lib_pypy/_tkinter/__init__.py @@ -22,6 +22,7 @@ READABLE = tklib.TCL_READABLE WRITABLE = tklib.TCL_WRITABLE EXCEPTION = tklib.TCL_EXCEPTION +DONT_WAIT = tklib.TCL_DONT_WAIT def create(screenName=None, baseName=None, className=None, interactive=False, wantobjects=False, wantTk=True, diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -4,7 +4,23 @@ from . import TclError from .tclobj import TclObject, FromObj, AsObj, TypeCache +import contextlib import sys +import threading +import time + + +class _DummyLock(object): + "A lock-like object that does not do anything" + def acquire(self): + pass + def release(self): + pass + def __enter__(self): + pass + def __exit__(self, *exc): + pass + def varname_converter(input): if isinstance(input, TclObject): @@ -37,17 +53,18 @@ def PythonCmd(clientData, interp, argc, argv): self = tkffi.from_handle(clientData) assert self.app.interp == interp - try: - args = [tkffi.string(arg) for arg in argv[1:argc]] - result = self.func(*args) - obj = AsObj(result) - tklib.Tcl_SetObjResult(interp, obj) - except: - self.app.errorInCmd = True - self.app.exc_info = sys.exc_info() - return tklib.TCL_ERROR - else: - return tklib.TCL_OK + with self.app._tcl_lock_released(): + try: + args = [tkffi.string(arg) for arg in argv[1:argc]] + result = self.func(*args) + obj = AsObj(result) + tklib.Tcl_SetObjResult(interp, obj) + except: + self.app.errorInCmd = True + self.app.exc_info = sys.exc_info() + return tklib.TCL_ERROR + else: + return tklib.TCL_OK @tkffi.callback("Tcl_CmdDeleteProc") def PythonCmdDelete(clientData): @@ -58,6 +75,8 @@ class TkApp(object): + _busywaitinterval = 0.02 # 20ms. + def __new__(cls, screenName, baseName, className, interactive, wantobjects, wantTk, sync, use): if not wantobjects: @@ -73,6 +92,12 @@ self.quitMainLoop = False self.errorInCmd = False + if not self.threaded: + # TCL is not thread-safe, calls needs to be serialized. + self._tcl_lock = threading.Lock() + else: + self._tcl_lock = _DummyLock() + self._typeCache = TypeCache() self._commands = {} @@ -133,6 +158,13 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise RuntimeError("Calling Tcl from different appartment") + @contextlib.contextmanager + def _tcl_lock_released(self): + "Context manager to temporarily release the tcl lock." + self._tcl_lock.release() + yield + self._tcl_lock.acquire() + def loadtk(self): # We want to guard against calling Tk_Init() multiple times err = tklib.Tcl_Eval(self.interp, "info exists tk_version") @@ -159,22 +191,25 @@ flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) - if not res: - self.raiseTclError() - assert self._wantobjects - return FromObj(self, res) + with self._tcl_lock: + res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) + if not res: + self.raiseTclError() + assert self._wantobjects + return FromObj(self, res) def _setvar(self, name1, value, global_only=False): name1 = varname_converter(name1) + # XXX Acquire tcl lock??? newval = AsObj(value) flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, - newval, flags) - if not res: - self.raiseTclError() + with self._tcl_lock: + res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, + newval, flags) + if not res: + self.raiseTclError() def _unsetvar(self, name1, name2=None, global_only=False): name1 = varname_converter(name1) @@ -183,9 +218,10 @@ flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) - if res == tklib.TCL_ERROR: - self.raiseTclError() + with self._tcl_lock: + res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() def getvar(self, name1, name2=None): return self._var_invoke(self._getvar, name1, name2) @@ -219,9 +255,10 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise NotImplementedError("Call from another thread") - res = tklib.Tcl_CreateCommand( - self.interp, cmdName, _CommandData.PythonCmd, - clientData, _CommandData.PythonCmdDelete) + with self._tcl_lock: + res = tklib.Tcl_CreateCommand( + self.interp, cmdName, _CommandData.PythonCmd, + clientData, _CommandData.PythonCmdDelete) if not res: raise TclError("can't create Tcl command") @@ -229,7 +266,8 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise NotImplementedError("Call from another thread") - res = tklib.Tcl_DeleteCommand(self.interp, cmdName) + with self._tcl_lock: + res = tklib.Tcl_DeleteCommand(self.interp, cmdName) if res == -1: raise TclError("can't delete Tcl command") @@ -256,11 +294,12 @@ tklib.Tcl_IncrRefCount(obj) objects[i] = obj - res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) - if res == tklib.TCL_ERROR: - self.raiseTclError() - else: - result = self._callResult() + with self._tcl_lock: + res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + else: + result = self._callResult() finally: for obj in objects: if obj: @@ -280,17 +319,19 @@ def eval(self, script): self._check_tcl_appartment() - res = tklib.Tcl_Eval(self.interp, script) - if res == tklib.TCL_ERROR: - self.raiseTclError() - return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + with self._tcl_lock: + res = tklib.Tcl_Eval(self.interp, script) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) def evalfile(self, filename): self._check_tcl_appartment() - res = tklib.Tcl_EvalFile(self.interp, filename) - if res == tklib.TCL_ERROR: - self.raiseTclError() - return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + with self._tcl_lock: + res = tklib.Tcl_EvalFile(self.interp, filename) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) def split(self, arg): if isinstance(arg, tuple): @@ -375,7 +416,10 @@ if self.threaded: result = tklib.Tcl_DoOneEvent(0) else: - raise NotImplementedError("TCL configured without threads") + with self._tcl_lock: + result = tklib.Tcl_DoOneEvent(tklib.TCL_DONT_WAIT) + if result == 0: + time.sleep(self._busywaitinterval) if result < 0: break diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py --- a/lib_pypy/_tkinter/tclobj.py +++ b/lib_pypy/_tkinter/tclobj.py @@ -28,9 +28,11 @@ return result elif value.typePtr == typeCache.BooleanType: - return result + return bool(value.internalRep.longValue) elif value.typePtr == typeCache.ByteArrayType: - return result + size = tkffi.new('int*') + data = tklib.Tcl_GetByteArrayFromObj(value, size) + return tkffi.buffer(data, size[0])[:] elif value.typePtr == typeCache.DoubleType: return value.internalRep.doubleValue elif value.typePtr == typeCache.IntType: @@ -50,7 +52,7 @@ result.append(FromObj(app, tcl_elem[0])) return tuple(result) elif value.typePtr == typeCache.ProcBodyType: - return result + pass # fall through and return tcl object. elif value.typePtr == typeCache.StringType: buf = tklib.Tcl_GetUnicode(value) length = tklib.Tcl_GetCharLength(value) diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -1,6 +1,7 @@ # C bindings with libtcl and libtk. from cffi import FFI +import sys tkffi = FFI() @@ -18,6 +19,8 @@ #define TCL_EVAL_DIRECT ... #define TCL_EVAL_GLOBAL ... +#define TCL_DONT_WAIT ... + typedef unsigned short Tcl_UniChar; typedef ... Tcl_Interp; typedef ...* Tcl_ThreadId; @@ -69,6 +72,7 @@ int Tcl_GetBoolean(Tcl_Interp* interp, const char* src, int* boolPtr); char *Tcl_GetString(Tcl_Obj* objPtr); char *Tcl_GetStringFromObj(Tcl_Obj* objPtr, int* lengthPtr); +unsigned char *Tcl_GetByteArrayFromObj(Tcl_Obj* objPtr, int* lengthPtr); Tcl_UniChar *Tcl_GetUnicode(Tcl_Obj* objPtr); int Tcl_GetCharLength(Tcl_Obj* objPtr); @@ -102,6 +106,17 @@ int Tk_GetNumMainWindows(); """) +# XXX find a better way to detect paths +# XXX pick up CPPFLAGS and LDFLAGS and add to these paths? +if sys.platform.startswith("openbsd"): + incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] + linklibs = ['tk85', 'tcl85'] + libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +else: + incdirs=['/usr/include/tcl'] + linklibs=['tcl', 'tk'] + libdirs = [] + tklib = tkffi.verify(""" #include #include @@ -109,6 +124,7 @@ char *get_tk_version() { return TK_VERSION; } char *get_tcl_version() { return TCL_VERSION; } """, -include_dirs=['/usr/include/tcl'], -libraries=['tcl', 'tk'], +include_dirs=incdirs, +libraries=linklibs, +library_dirs = libdirs ) diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7" -__version_info__ = (0, 7) +__version__ = "0.7.2" +__version_info__ = (0, 7, 2) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -54,7 +54,8 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert backend.__version__ == __version__ + assert (backend.__version__ == __version__ or + backend.__version__ == __version__[:3]) # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) diff --git a/lib_pypy/cffi/commontypes.py b/lib_pypy/cffi/commontypes.py --- a/lib_pypy/cffi/commontypes.py +++ b/lib_pypy/cffi/commontypes.py @@ -30,7 +30,9 @@ elif result in model.PrimitiveType.ALL_PRIMITIVE_TYPES: result = model.PrimitiveType(result) else: - assert commontype != result + if commontype == result: + raise api.FFIError("Unsupported type: %r. Please file a bug " + "if you think it should be." % (commontype,)) result = resolve_common_type(result) # recursively assert isinstance(result, model.BaseTypeByIdentity) _CACHE[commontype] = result diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -290,13 +290,26 @@ # assume a primitive type. get it from .names, but reduce # synonyms to a single chosen combination names = list(type.names) - if names == ['signed'] or names == ['unsigned']: - names.append('int') - if names[0] == 'signed' and names != ['signed', 'char']: - names.pop(0) - if (len(names) > 1 and names[-1] == 'int' - and names != ['unsigned', 'int']): - names.pop() + if names != ['signed', 'char']: # keep this unmodified + prefixes = {} + while names: + name = names[0] + if name in ('short', 'long', 'signed', 'unsigned'): + prefixes[name] = prefixes.get(name, 0) + 1 + del names[0] + else: + break + # ignore the 'signed' prefix below, and reorder the others + newnames = [] + for prefix in ('unsigned', 'short', 'long'): + for i in range(prefixes.get(prefix, 0)): + newnames.append(prefix) + if not names: + names = ['int'] # implicitly + if names == ['int']: # but kill it if 'short' or 'long' + if 'short' in prefixes or 'long' in prefixes: + names = [] + names = newnames + names ident = ' '.join(names) if ident == 'void': return model.void_type @@ -500,8 +513,8 @@ self._partial_length = True return None # - raise api.FFIError("unsupported non-constant or " - "not immediately constant expression") + raise api.FFIError("unsupported expression: expected a " + "simple numeric constant") def _build_enum_type(self, explicit_name, decls): if decls is not None: diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -61,7 +61,9 @@ def load_library(self): # import it with the CFFI backend backend = self.ffi._backend - module = backend.load_library(self.verifier.modulefilename) + # needs to make a path that contains '/', on Posix + filename = os.path.join(os.curdir, self.verifier.modulefilename) + module = backend.load_library(filename) # # call loading_gen_struct() to get the struct layout inferred by # the C compiler diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -40,9 +40,9 @@ # for all computations. See the book for algorithms for converting between # proleptic Gregorian ordinals and many other calendar systems. -_DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] +_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] -_DAYS_BEFORE_MONTH = [None] +_DAYS_BEFORE_MONTH = [-1] dbm = 0 for dim in _DAYS_IN_MONTH[1:]: _DAYS_BEFORE_MONTH.append(dbm) diff --git a/lib_pypy/numpy.py b/lib_pypy/numpy.py deleted file mode 100644 --- a/lib_pypy/numpy.py +++ /dev/null @@ -1,5 +0,0 @@ -raise ImportError( - "The 'numpy' module of PyPy is in-development and not complete. " - "To try it out anyway, you can either import from 'numpypy', " - "or just write 'import numpypy' first in your program and then " - "import from 'numpy' as usual.") diff --git a/lib_pypy/numpypy/__init__.py b/lib_pypy/numpypy/__init__.py deleted file mode 100644 --- a/lib_pypy/numpypy/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -import core -from core import * -import lib -from lib import * - -from __builtin__ import bool, int, long, float, complex, object, unicode, str -from core import abs, max, min - -__all__ = [] -__all__ += core.__all__ -__all__ += lib.__all__ - -import sys -sys.modules.setdefault('numpy', sys.modules['numpypy']) diff --git a/lib_pypy/numpypy/core/__init__.py b/lib_pypy/numpypy/core/__init__.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -import numeric -from numeric import * -import fromnumeric -from fromnumeric import * -import shape_base -from shape_base import * - -from fromnumeric import amax as max, amin as min -from numeric import absolute as abs - -__all__ = [] -__all__ += numeric.__all__ -__all__ += fromnumeric.__all__ -__all__ += shape_base.__all__ diff --git a/lib_pypy/numpypy/core/_methods.py b/lib_pypy/numpypy/core/_methods.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/_methods.py +++ /dev/null @@ -1,109 +0,0 @@ -# Array methods which are called by the both the C-code for the method -# and the Python code for the NumPy-namespace function - -import multiarray as mu -import umath as um -from numeric import asanyarray - -def _amax(a, axis=None, out=None, keepdims=False): - return um.maximum.reduce(a, axis=axis, - out=out, keepdims=keepdims) - -def _amin(a, axis=None, out=None, keepdims=False): - return um.minimum.reduce(a, axis=axis, - out=out, keepdims=keepdims) - -def _sum(a, axis=None, dtype=None, out=None, keepdims=False): - return um.add.reduce(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def _prod(a, axis=None, dtype=None, out=None, keepdims=False): - return um.multiply.reduce(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def _any(a, axis=None, dtype=None, out=None, keepdims=False): - return um.logical_or.reduce(a, axis=axis, dtype=dtype, out=out, - keepdims=keepdims) - -def _all(a, axis=None, dtype=None, out=None, keepdims=False): - return um.logical_and.reduce(a, axis=axis, dtype=dtype, out=out, - keepdims=keepdims) - -def _count_reduce_items(arr, axis): - if axis is None: - axis = tuple(xrange(arr.ndim)) - if not isinstance(axis, tuple): - axis = (axis,) - items = 1 - for ax in axis: - items *= arr.shape[ax] - return items - -def _mean(a, axis=None, dtype=None, out=None, keepdims=False): - arr = asanyarray(a) - - # Upgrade bool, unsigned int, and int to float64 - if dtype is None and arr.dtype.kind in ['b','u','i']: - ret = um.add.reduce(arr, axis=axis, dtype='f8', - out=out, keepdims=keepdims) - else: - ret = um.add.reduce(arr, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - rcount = _count_reduce_items(arr, axis) - if isinstance(ret, mu.ndarray): - ret = um.true_divide(ret, rcount, - out=ret, casting='unsafe', subok=False) - else: - ret = ret / float(rcount) - return ret - -def _var(a, axis=None, dtype=None, out=None, ddof=0, - keepdims=False): - arr = asanyarray(a) - - # First compute the mean, saving 'rcount' for reuse later - if dtype is None and arr.dtype.kind in ['b','u','i']: - arrmean = um.add.reduce(arr, axis=axis, dtype='f8', keepdims=True) - else: - arrmean = um.add.reduce(arr, axis=axis, dtype=dtype, keepdims=True) - rcount = _count_reduce_items(arr, axis) - if isinstance(arrmean, mu.ndarray): - arrmean = um.true_divide(arrmean, rcount, - out=arrmean, casting='unsafe', subok=False) - else: - arrmean = arrmean / float(rcount) - - # arr - arrmean - x = arr - arrmean - - # (arr - arrmean) ** 2 - if arr.dtype.kind == 'c': - x = um.multiply(x, um.conjugate(x), out=x).real - else: - x = um.multiply(x, x, out=x) - - # add.reduce((arr - arrmean) ** 2, axis) - ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - - # add.reduce((arr - arrmean) ** 2, axis) / (n - ddof) - if not keepdims and isinstance(rcount, mu.ndarray): - rcount = rcount.squeeze(axis=axis) - rcount -= ddof - if isinstance(ret, mu.ndarray): - ret = um.true_divide(ret, rcount, - out=ret, casting='unsafe', subok=False) - else: - ret = ret / float(rcount) - - return ret - -def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) - - if isinstance(ret, mu.ndarray): - ret = um.sqrt(ret, out=ret) - else: - ret = um.sqrt(ret) - - return ret diff --git a/lib_pypy/numpypy/core/arrayprint.py b/lib_pypy/numpypy/core/arrayprint.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/arrayprint.py +++ /dev/null @@ -1,750 +0,0 @@ -"""Array printing function - -$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ -""" -__all__ = ["array2string", "set_printoptions", "get_printoptions"] -__docformat__ = 'restructuredtext' - -# -# Written by Konrad Hinsen -# last revision: 1996-3-13 -# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) -# and by Perry Greenfield 2000-4-1 for numarray -# and by Travis Oliphant 2005-8-22 for numpy - -import sys -import numerictypes as _nt -from umath import maximum, minimum, absolute, not_equal, isnan, isinf -#from multiarray import format_longfloat, datetime_as_string, datetime_data -from fromnumeric import ravel - - -def product(x, y): return x*y - -_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension -_summaryThreshold = 1000 # total items > triggers array summarization - -_float_output_precision = 8 -_float_output_suppress_small = False -_line_width = 75 -_nan_str = 'nan' -_inf_str = 'inf' -_formatter = None # formatting function for array elements - -if sys.version_info[0] >= 3: - from functools import reduce - -def set_printoptions(precision=None, threshold=None, edgeitems=None, - linewidth=None, suppress=None, - nanstr=None, infstr=None, - formatter=None): - """ - Set printing options. - - These options determine the way floating point numbers, arrays and - other NumPy objects are displayed. - - Parameters - ---------- - precision : int, optional - Number of digits of precision for floating point output (default 8). - threshold : int, optional - Total number of array elements which trigger summarization - rather than full repr (default 1000). - edgeitems : int, optional - Number of array items in summary at beginning and end of - each dimension (default 3). - linewidth : int, optional - The number of characters per line for the purpose of inserting - line breaks (default 75). - suppress : bool, optional - Whether or not suppress printing of small floating point values - using scientific notation (default False). - nanstr : str, optional - String representation of floating point not-a-number (default nan). - infstr : str, optional - String representation of floating point infinity (default inf). - formatter : dict of callables, optional - If not None, the keys should indicate the type(s) that the respective - formatting function applies to. Callables should return a string. - Types that are not specified (by their corresponding keys) are handled - by the default formatters. Individual types for which a formatter - can be set are:: - - - 'bool' - - 'int' - - 'timedelta' : a `numpy.timedelta64` - - 'datetime' : a `numpy.datetime64` - - 'float' - - 'longfloat' : 128-bit floats - - 'complexfloat' - - 'longcomplexfloat' : composed of two 128-bit floats - - 'numpy_str' : types `numpy.string_` and `numpy.unicode_` - - 'str' : all other strings - - Other keys that can be used to set a group of types at once are:: - - - 'all' : sets all types - - 'int_kind' : sets 'int' - - 'float_kind' : sets 'float' and 'longfloat' - - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' - - See Also - -------- - get_printoptions, set_string_function, array2string - - Notes - ----- - `formatter` is always reset with a call to `set_printoptions`. - - Examples - -------- - Floating point precision can be set: - - >>> np.set_printoptions(precision=4) - >>> print np.array([1.123456789]) - [ 1.1235] - - Long arrays can be summarised: - - >>> np.set_printoptions(threshold=5) - >>> print np.arange(10) - [0 1 2 ..., 7 8 9] - - Small results can be suppressed: - - >>> eps = np.finfo(float).eps - >>> x = np.arange(4.) - >>> x**2 - (x + eps)**2 - array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) - >>> np.set_printoptions(suppress=True) - >>> x**2 - (x + eps)**2 - array([-0., -0., 0., 0.]) - - A custom formatter can be used to display array elements as desired: - - >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) - >>> x = np.arange(3) - >>> x - array([int: 0, int: -1, int: -2]) - >>> np.set_printoptions() # formatter gets reset - >>> x - array([0, 1, 2]) - - To put back the default options, you can use: - - >>> np.set_printoptions(edgeitems=3,infstr='inf', - ... linewidth=75, nanstr='nan', precision=8, - ... suppress=False, threshold=1000, formatter=None) - """ - - global _summaryThreshold, _summaryEdgeItems, _float_output_precision, \ - _line_width, _float_output_suppress_small, _nan_str, _inf_str, \ - _formatter - if linewidth is not None: - _line_width = linewidth - if threshold is not None: - _summaryThreshold = threshold - if edgeitems is not None: - _summaryEdgeItems = edgeitems - if precision is not None: - _float_output_precision = precision - if suppress is not None: - _float_output_suppress_small = not not suppress - if nanstr is not None: - _nan_str = nanstr - if infstr is not None: - _inf_str = infstr - _formatter = formatter - -def get_printoptions(): - """ - Return the current print options. - - Returns - ------- - print_opts : dict - Dictionary of current print options with keys - - - precision : int - - threshold : int - - edgeitems : int - - linewidth : int - - suppress : bool - - nanstr : str - - infstr : str - - formatter : dict of callables - - For a full description of these options, see `set_printoptions`. - - See Also - -------- - set_printoptions, set_string_function - - """ - d = dict(precision=_float_output_precision, - threshold=_summaryThreshold, - edgeitems=_summaryEdgeItems, - linewidth=_line_width, - suppress=_float_output_suppress_small, - nanstr=_nan_str, - infstr=_inf_str, - formatter=_formatter) - return d - -def _leading_trailing(a): - import numeric as _nc - if a.ndim == 1: - if len(a) > 2*_summaryEdgeItems: - b = _nc.concatenate((a[:_summaryEdgeItems], - a[-_summaryEdgeItems:])) - else: - b = a - else: - if len(a) > 2*_summaryEdgeItems: - l = [_leading_trailing(a[i]) for i in range( - min(len(a), _summaryEdgeItems))] - l.extend([_leading_trailing(a[-i]) for i in range( - min(len(a), _summaryEdgeItems),0,-1)]) - else: - l = [_leading_trailing(a[i]) for i in range(0, len(a))] - b = _nc.concatenate(tuple(l)) - return b - -def _boolFormatter(x): - if x: - return ' True' - else: - return 'False' - - -def repr_format(x): - return repr(x) - -def _array2string(a, max_line_width, precision, suppress_small, separator=' ', - prefix="", formatter=None): - - if max_line_width is None: - max_line_width = _line_width - - if precision is None: - precision = _float_output_precision - - if suppress_small is None: - suppress_small = _float_output_suppress_small - - if formatter is None: - formatter = _formatter - - if a.size > _summaryThreshold: - summary_insert = "..., " - data = _leading_trailing(a) - else: - summary_insert = "" - data = ravel(a) - - formatdict = {'bool' : _boolFormatter, - 'int' : IntegerFormat(data), - 'float' : FloatFormat(data, precision, suppress_small), - 'longfloat' : LongFloatFormat(precision), - 'complexfloat' : ComplexFormat(data, precision, - suppress_small), - 'longcomplexfloat' : LongComplexFormat(precision), - 'datetime' : DatetimeFormat(data), - 'timedelta' : TimedeltaFormat(data), - 'numpystr' : repr_format, - 'str' : str} - - if formatter is not None: - fkeys = [k for k in formatter.keys() if formatter[k] is not None] - if 'all' in fkeys: - for key in formatdict.keys(): - formatdict[key] = formatter['all'] - if 'int_kind' in fkeys: - for key in ['int']: - formatdict[key] = formatter['int_kind'] - if 'float_kind' in fkeys: - for key in ['float', 'longfloat']: - formatdict[key] = formatter['float_kind'] - if 'complex_kind' in fkeys: - for key in ['complexfloat', 'longcomplexfloat']: - formatdict[key] = formatter['complex_kind'] - if 'str_kind' in fkeys: - for key in ['numpystr', 'str']: - formatdict[key] = formatter['str_kind'] - for key in formatdict.keys(): - if key in fkeys: - formatdict[key] = formatter[key] - - try: - format_function = a._format - msg = "The `_format` attribute is deprecated in Numpy 2.0 and " \ - "will be removed in 2.1. Use the `formatter` kw instead." - import warnings - warnings.warn(msg, DeprecationWarning) - except AttributeError: - # find the right formatting function for the array - dtypeobj = a.dtype.type - if issubclass(dtypeobj, _nt.bool_): - format_function = formatdict['bool'] - elif issubclass(dtypeobj, _nt.integer): - #if issubclass(dtypeobj, _nt.timedelta64): - # format_function = formatdict['timedelta'] - #else: - format_function = formatdict['int'] - elif issubclass(dtypeobj, _nt.floating): - if hasattr(_nt, 'longfloat') and issubclass(dtypeobj, _nt.longfloat): - format_function = formatdict['longfloat'] - else: - format_function = formatdict['float'] - elif issubclass(dtypeobj, _nt.complexfloating): - if hasattr(_nt, 'clongfloat') and issubclass(dtypeobj, _nt.clongfloat): - format_function = formatdict['longcomplexfloat'] - else: - format_function = formatdict['complexfloat'] - elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)): - format_function = formatdict['numpystr'] - #elif issubclass(dtypeobj, _nt.datetime64): - # format_function = formatdict['datetime'] - else: - format_function = formatdict['str'] - - # skip over "[" - next_line_prefix = " " - # skip over array( - next_line_prefix += " "*len(prefix) - - lst = _formatArray(a, format_function, len(a.shape), max_line_width, - next_line_prefix, separator, - _summaryEdgeItems, summary_insert)[:-1] - return lst - -def _convert_arrays(obj): - import numeric as _nc - newtup = [] - for k in obj: - if isinstance(k, _nc.ndarray): - k = k.tolist() - elif isinstance(k, tuple): - k = _convert_arrays(k) - newtup.append(k) - return tuple(newtup) - - -def array2string(a, max_line_width=None, precision=None, - suppress_small=None, separator=' ', prefix="", - style=repr, formatter=None): - """ - Return a string representation of an array. - - Parameters - ---------- - a : ndarray - Input array. - max_line_width : int, optional - The maximum number of columns the string should span. Newline - characters splits the string appropriately after array elements. - precision : int, optional - Floating point precision. Default is the current printing - precision (usually 8), which can be altered using `set_printoptions`. - suppress_small : bool, optional - Represent very small numbers as zero. A number is "very small" if it - is smaller than the current printing precision. - separator : str, optional - Inserted between elements. - prefix : str, optional - An array is typically printed as:: - - 'prefix(' + array2string(a) + ')' - - The length of the prefix string is used to align the - output correctly. - style : function, optional - A function that accepts an ndarray and returns a string. Used only - when the shape of `a` is equal to ``()``, i.e. for 0-D arrays. - formatter : dict of callables, optional - If not None, the keys should indicate the type(s) that the respective - formatting function applies to. Callables should return a string. - Types that are not specified (by their corresponding keys) are handled - by the default formatters. Individual types for which a formatter - can be set are:: - - - 'bool' - - 'int' - - 'timedelta' : a `numpy.timedelta64` - - 'datetime' : a `numpy.datetime64` - - 'float' - - 'longfloat' : 128-bit floats - - 'complexfloat' - - 'longcomplexfloat' : composed of two 128-bit floats - - 'numpy_str' : types `numpy.string_` and `numpy.unicode_` - - 'str' : all other strings - - Other keys that can be used to set a group of types at once are:: - - - 'all' : sets all types - - 'int_kind' : sets 'int' - - 'float_kind' : sets 'float' and 'longfloat' - - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' - - Returns - ------- - array_str : str - String representation of the array. - - Raises - ------ - TypeError : if a callable in `formatter` does not return a string. - - See Also - -------- - array_str, array_repr, set_printoptions, get_printoptions - - Notes - ----- - If a formatter is specified for a certain type, the `precision` keyword is - ignored for that type. - - Examples - -------- - >>> x = np.array([1e-16,1,2,3]) - >>> print np.array2string(x, precision=2, separator=',', - ... suppress_small=True) - [ 0., 1., 2., 3.] - - >>> x = np.arange(3.) - >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) - '[0.00 1.00 2.00]' - - >>> x = np.arange(3) - >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) - '[0x0L 0x1L 0x2L]' - - """ - - if a.shape == (): - x = a.item() - try: - lst = a._format(x) - msg = "The `_format` attribute is deprecated in Numpy " \ - "2.0 and will be removed in 2.1. Use the " \ - "`formatter` kw instead." - import warnings - warnings.warn(msg, DeprecationWarning) - except AttributeError: - if isinstance(x, tuple): - x = _convert_arrays(x) - lst = style(x) - elif reduce(product, a.shape) == 0: - # treat as a null array if any of shape elements == 0 - lst = "[]" - else: - lst = _array2string(a, max_line_width, precision, suppress_small, - separator, prefix, formatter=formatter) - return lst - -def _extendLine(s, line, word, max_line_len, next_line_prefix): - if len(line.rstrip()) + len(word.rstrip()) >= max_line_len: - s += line.rstrip() + "\n" - line = next_line_prefix - line += word - return s, line - - -def _formatArray(a, format_function, rank, max_line_len, - next_line_prefix, separator, edge_items, summary_insert): - """formatArray is designed for two modes of operation: - - 1. Full output - - 2. Summarized output - - """ - if rank == 0: - obj = a.item() - if isinstance(obj, tuple): - obj = _convert_arrays(obj) - return str(obj) - - if summary_insert and 2*edge_items < len(a): - leading_items, trailing_items, summary_insert1 = \ - edge_items, edge_items, summary_insert - else: - leading_items, trailing_items, summary_insert1 = 0, len(a), "" - - if rank == 1: - s = "" - line = next_line_prefix - for i in xrange(leading_items): - word = format_function(a[i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - - if summary_insert1: - s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix) - - for i in xrange(trailing_items, 1, -1): - word = format_function(a[-i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - - word = format_function(a[-1]) - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - s += line + "]\n" - s = '[' + s[len(next_line_prefix):] - else: - s = '[' - sep = separator.rstrip() - for i in xrange(leading_items): - if i > 0: - s += next_line_prefix - s += _formatArray(a[i], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1) - - if summary_insert1: - s += next_line_prefix + summary_insert1 + "\n" - - for i in xrange(trailing_items, 1, -1): - if leading_items or i != trailing_items: - s += next_line_prefix - s += _formatArray(a[-i], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1) - if leading_items or trailing_items > 1: - s += next_line_prefix - s += _formatArray(a[-1], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert).rstrip()+']\n' - return s - -class FloatFormat(object): - def __init__(self, data, precision, suppress_small, sign=False): - self.precision = precision - self.suppress_small = suppress_small - self.sign = sign - self.exp_format = False - self.large_exponent = False - self.max_str_len = 0 - try: - self.fillFormat(data) - except (TypeError, NotImplementedError): - # if reduce(data) fails, this instance will not be called, just - # instantiated in formatdict. - pass - - def fillFormat(self, data): - import numeric as _nc - errstate = _nc.seterr(all='ignore') - try: - special = isnan(data) | isinf(data) - valid = not_equal(data, 0) & ~special - non_zero = absolute(data.compress(valid)) - if len(non_zero) == 0: - max_val = 0. - min_val = 0. - else: - max_val = maximum.reduce(non_zero) - min_val = minimum.reduce(non_zero) - if max_val >= 1.e8: - self.exp_format = True - if not self.suppress_small and (min_val < 0.0001 - or max_val/min_val > 1000.): - self.exp_format = True - finally: - _nc.seterr(**errstate) - - if self.exp_format: - self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100 - self.max_str_len = 8 + self.precision - if self.large_exponent: - self.max_str_len += 1 - if self.sign: - format = '%+' - else: - format = '%' - format = format + '%d.%de' % (self.max_str_len, self.precision) - else: - format = '%%.%df' % (self.precision,) - if len(non_zero): - precision = max([_digits(x, self.precision, format) - for x in non_zero]) - else: - precision = 0 - precision = min(self.precision, precision) - self.max_str_len = len(str(int(max_val))) + precision + 2 - if _nc.any(special): - self.max_str_len = max(self.max_str_len, - len(_nan_str), - len(_inf_str)+1) - if self.sign: - format = '%#+' - else: - format = '%#' - format = format + '%d.%df' % (self.max_str_len, precision) - - self.special_fmt = '%%%ds' % (self.max_str_len,) - self.format = format - - def __call__(self, x, strip_zeros=True): - import numeric as _nc - err = _nc.seterr(invalid='ignore') - try: - if isnan(x): - if self.sign: - return self.special_fmt % ('+' + _nan_str,) - else: - return self.special_fmt % (_nan_str,) - elif isinf(x): - if x > 0: - if self.sign: - return self.special_fmt % ('+' + _inf_str,) - else: - return self.special_fmt % (_inf_str,) - else: - return self.special_fmt % ('-' + _inf_str,) - finally: - _nc.seterr(**err) - - s = self.format % x - if self.large_exponent: - # 3-digit exponent - expsign = s[-3] - if expsign == '+' or expsign == '-': - s = s[1:-2] + '0' + s[-2:] - elif self.exp_format: - # 2-digit exponent - if s[-3] == '0': - s = ' ' + s[:-3] + s[-2:] - elif strip_zeros: - z = s.rstrip('0') - s = z + ' '*(len(s)-len(z)) - return s - - -def _digits(x, precision, format): - s = format % x - z = s.rstrip('0') - return precision - len(s) + len(z) - - -_MAXINT = sys.maxint -_MININT = -sys.maxint-1 -class IntegerFormat(object): - def __init__(self, data): - try: - max_str_len = max(len(str(maximum.reduce(data))), - len(str(minimum.reduce(data)))) - self.format = '%' + str(max_str_len) + 'd' - except (TypeError, NotImplementedError): - # if reduce(data) fails, this instance will not be called, just - # instantiated in formatdict. - pass - except ValueError: - # this occurs when everything is NA - pass - - def __call__(self, x): - if _MININT < x < _MAXINT: - return self.format % x - else: - return "%s" % x - -class LongFloatFormat(object): - # XXX Have to add something to determine the width to use a la FloatFormat - # Right now, things won't line up properly - def __init__(self, precision, sign=False): - self.precision = precision - self.sign = sign - - def __call__(self, x): - if isnan(x): - if self.sign: - return '+' + _nan_str - else: - return ' ' + _nan_str - elif isinf(x): - if x > 0: - if self.sign: - return '+' + _inf_str - else: - return ' ' + _inf_str - else: - return '-' + _inf_str - elif x >= 0: - if self.sign: - return '+' + format_longfloat(x, self.precision) - else: - return ' ' + format_longfloat(x, self.precision) - else: - return format_longfloat(x, self.precision) - - -class LongComplexFormat(object): - def __init__(self, precision): - self.real_format = LongFloatFormat(precision) - self.imag_format = LongFloatFormat(precision, sign=True) - - def __call__(self, x): - r = self.real_format(x.real) - i = self.imag_format(x.imag) - return r + i + 'j' - - -class ComplexFormat(object): - def __init__(self, x, precision, suppress_small): - self.real_format = FloatFormat(x.real, precision, suppress_small) - self.imag_format = FloatFormat(x.imag, precision, suppress_small, - sign=True) - - def __call__(self, x): - r = self.real_format(x.real, strip_zeros=False) - i = self.imag_format(x.imag, strip_zeros=False) - if not self.imag_format.exp_format: - z = i.rstrip('0') - i = z + 'j' + ' '*(len(i)-len(z)) - else: - i = i + 'j' - return r + i - -class DatetimeFormat(object): - def __init__(self, x, unit=None, - timezone=None, casting='same_kind'): - # Get the unit from the dtype - if unit is None: - if x.dtype.kind == 'M': - unit = datetime_data(x.dtype)[0] - else: - unit = 's' - - # If timezone is default, make it 'local' or 'UTC' based on the unit - if timezone is None: - # Date units -> UTC, time units -> local - if unit in ('Y', 'M', 'W', 'D'): - self.timezone = 'UTC' - else: - self.timezone = 'local' - else: - self.timezone = timezone - self.unit = unit - self.casting = casting - - def __call__(self, x): - return "'%s'" % datetime_as_string(x, - unit=self.unit, - timezone=self.timezone, - casting=self.casting) - -class TimedeltaFormat(object): - def __init__(self, data): - if data.dtype.kind == 'm': - v = data.view('i8') - max_str_len = max(len(str(maximum.reduce(v))), - len(str(minimum.reduce(v)))) - self.format = '%' + str(max_str_len) + 'd' - - def __call__(self, x): - return self.format % x.astype('i8') - diff --git a/lib_pypy/numpypy/core/fromnumeric.py b/lib_pypy/numpypy/core/fromnumeric.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/fromnumeric.py +++ /dev/null @@ -1,2431 +0,0 @@ -###################################################################### -# This is a copy of numpy/core/fromnumeric.py modified for numpypy -###################################################################### -# Each name in __all__ was a function in 'numeric' that is now -# a method in 'numpy'. -# When the corresponding method is added to numpypy BaseArray -# each function should be added as a module function -# at the applevel -# This can be as simple as doing the following -# -# def func(a, ...): -# if not hasattr(a, 'func') -# a = numpypy.array(a) -# return a.func(...) -# -###################################################################### - -import numpypy -import _numpypy - -# Module containing non-deprecated functions borrowed from Numeric. -__docformat__ = "restructuredtext en" - -# functions that are now methods -__all__ = ['take', 'reshape', 'choose', 'repeat', 'put', - 'swapaxes', 'transpose', 'sort', 'argsort', 'argmax', 'argmin', - 'searchsorted', 'alen', - 'resize', 'diagonal', 'trace', 'ravel', 'nonzero', 'shape', - 'compress', 'clip', 'sum', 'product', 'prod', 'sometrue', 'alltrue', - 'any', 'all', 'cumsum', 'cumproduct', 'cumprod', 'ptp', 'ndim', - 'rank', 'size', 'around', 'round_', 'mean', 'std', 'var', 'squeeze', - 'amax', 'amin', - ] - -def take(a, indices, axis=None, out=None, mode='raise'): - """ - Take elements from an array along an axis. - - This function does the same thing as "fancy" indexing (indexing arrays - using arrays); however, it can be easier to use if you need elements - along a given axis. - - Parameters - ---------- - a : array_like - The source array. - indices : array_like - The indices of the values to extract. - axis : int, optional - The axis over which to select values. By default, the flattened - input array is used. - out : ndarray, optional - If provided, the result will be placed in this array. It should - be of the appropriate shape and dtype. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - 'clip' mode means that all indices that are too large are replaced - by the index that addresses the last element along that axis. Note - that this disables indexing with negative numbers. - - Returns - ------- - subarray : ndarray - The returned array has the same type as `a`. - - See Also - -------- - ndarray.take : equivalent method - - Examples - -------- - >>> a = [4, 3, 5, 7, 6, 8] - >>> indices = [0, 1, 4] - >>> np.take(a, indices) - array([4, 3, 6]) - - In this example if `a` is an ndarray, "fancy" indexing can be used. - - >>> a = np.array(a) - >>> a[indices] - array([4, 3, 6]) - - """ - raise NotImplementedError('Waiting on interp level method') - - -# not deprecated --- copy if necessary, view otherwise -def reshape(a, newshape, order='C'): - """ - Gives a new shape to an array without changing its data. - - Parameters - ---------- - a : array_like - Array to be reshaped. - newshape : int or tuple of ints - The new shape should be compatible with the original shape. If - an integer, then the result will be a 1-D array of that length. - One shape dimension can be -1. In this case, the value is inferred - from the length of the array and remaining dimensions. - order : {'C', 'F', 'A'}, optional - Determines whether the array data should be viewed as in C - (row-major) order, FORTRAN (column-major) order, or the C/FORTRAN - order should be preserved. - - Returns - ------- - reshaped_array : ndarray - This will be a new view object if possible; otherwise, it will - be a copy. - - - See Also - -------- - ndarray.reshape : Equivalent method. - - Notes - ----- - - It is not always possible to change the shape of an array without - copying the data. If you want an error to be raise if the data is copied, - you should assign the new shape to the shape attribute of the array:: - - >>> a = np.zeros((10, 2)) - # A transpose make the array non-contiguous - >>> b = a.T - # Taking a view makes it possible to modify the shape without modiying the - # initial object. - >>> c = b.view() - >>> c.shape = (20) - AttributeError: incompatible shape for a non-contiguous array - - - Examples - -------- - >>> a = np.array([[1,2,3], [4,5,6]]) - >>> np.reshape(a, 6) - array([1, 2, 3, 4, 5, 6]) - >>> np.reshape(a, 6, order='F') - array([1, 4, 2, 5, 3, 6]) - - >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 - array([[1, 2], - [3, 4], - [5, 6]]) - - """ - assert order == 'C' - if not hasattr(a, 'reshape'): - a = numpypy.array(a) - return a.reshape(newshape) - - -def choose(a, choices, out=None, mode='raise'): - """ - Construct an array from an index array and a set of arrays to choose from. - - First of all, if confused or uncertain, definitely look at the Examples - - in its full generality, this function is less simple than it might - seem from the following code description (below ndi = - `numpy.lib.index_tricks`): - - ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``. - - But this omits some subtleties. Here is a fully general summary: - - Given an "index" array (`a`) of integers and a sequence of `n` arrays - (`choices`), `a` and each choice array are first broadcast, as necessary, - to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = - 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` - for each `i`. Then, a new array with shape ``Ba.shape`` is created as - follows: - - * if ``mode=raise`` (the default), then, first of all, each element of - `a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that - `i` (in that range) is the value at the `(j0, j1, ..., jm)` position - in `Ba` - then the value at the same position in the new array is the - value in `Bchoices[i]` at that same position; - - * if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed) - integer; modular arithmetic is used to map integers outside the range - `[0, n-1]` back into that range; and then the new array is constructed - as above; - - * if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed) - integer; negative integers are mapped to 0; values greater than `n-1` - are mapped to `n-1`; and then the new array is constructed as above. - - Parameters - ---------- - a : int array - This array must contain integers in `[0, n-1]`, where `n` is the number - of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any - integers are permissible. - choices : sequence of arrays - Choice arrays. `a` and all of the choices must be broadcastable to the - same shape. If `choices` is itself an array (not recommended), then - its outermost dimension (i.e., the one corresponding to - ``choices.shape[0]``) is taken as defining the "sequence". - out : array, optional - If provided, the result will be inserted into this array. It should - be of the appropriate shape and dtype. - mode : {'raise' (default), 'wrap', 'clip'}, optional - Specifies how indices outside `[0, n-1]` will be treated: - - * 'raise' : an exception is raised - * 'wrap' : value becomes value mod `n` - * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 - - Returns - ------- - merged_array : array - The merged result. - - Raises - ------ - ValueError: shape mismatch - If `a` and each choice array are not all broadcastable to the same - shape. - - See Also - -------- - ndarray.choose : equivalent method - - Notes - ----- - To reduce the chance of misinterpretation, even though the following - "abuse" is nominally supported, `choices` should neither be, nor be - thought of as, a single array, i.e., the outermost sequence-like container - should be either a list or a tuple. - - Examples - -------- - - >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], - ... [20, 21, 22, 23], [30, 31, 32, 33]] - >>> np.choose([2, 3, 1, 0], choices - ... # the first element of the result will be the first element of the - ... # third (2+1) "array" in choices, namely, 20; the second element - ... # will be the second element of the fourth (3+1) choice array, i.e., - ... # 31, etc. - ... ) - array([20, 31, 12, 3]) - >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) - array([20, 31, 12, 3]) - >>> # because there are 4 choice arrays - >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) - array([20, 1, 12, 3]) - >>> # i.e., 0 - - A couple examples illustrating how choose broadcasts: - - >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] - >>> choices = [-10, 10] - >>> np.choose(a, choices) - array([[ 10, -10, 10], - [-10, 10, -10], - [ 10, -10, 10]]) - - >>> # With thanks to Anne Archibald - >>> a = np.array([0, 1]).reshape((2,1,1)) - >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) - >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) - >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 - array([[[ 1, 1, 1, 1, 1], - [ 2, 2, 2, 2, 2], - [ 3, 3, 3, 3, 3]], - [[-1, -2, -3, -4, -5], - [-1, -2, -3, -4, -5], - [-1, -2, -3, -4, -5]]]) - - """ - return _numpypy.choose(a, choices, out, mode) - - -def repeat(a, repeats, axis=None): - """ - Repeat elements of an array. - - Parameters - ---------- - a : array_like - Input array. - repeats : {int, array of ints} - The number of repetitions for each element. `repeats` is broadcasted - to fit the shape of the given axis. - axis : int, optional - The axis along which to repeat values. By default, use the - flattened input array, and return a flat output array. - - Returns - ------- - repeated_array : ndarray - Output array which has the same shape as `a`, except along - the given axis. - - See Also - -------- - tile : Tile an array. - - Examples - -------- - >>> x = np.array([[1,2],[3,4]]) - >>> np.repeat(x, 2) - array([1, 1, 2, 2, 3, 3, 4, 4]) - >>> np.repeat(x, 3, axis=1) - array([[1, 1, 1, 2, 2, 2], - [3, 3, 3, 4, 4, 4]]) - >>> np.repeat(x, [1, 2], axis=0) - array([[1, 2], - [3, 4], - [3, 4]]) - - """ - return _numpypy.repeat(a, repeats, axis) - - -def put(a, ind, v, mode='raise'): - """ - Replaces specified elements of an array with given values. - - The indexing works on the flattened target array. `put` is roughly - equivalent to: - - :: - - a.flat[ind] = v - - Parameters - ---------- - a : ndarray - Target array. - ind : array_like - Target indices, interpreted as integers. - v : array_like - Values to place in `a` at target indices. If `v` is shorter than - `ind` it will be repeated as necessary. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - 'clip' mode means that all indices that are too large are replaced - by the index that addresses the last element along that axis. Note - that this disables indexing with negative numbers. - - See Also - -------- - putmask, place - - Examples - -------- - >>> a = np.arange(5) - >>> np.put(a, [0, 2], [-44, -55]) - >>> a - array([-44, 1, -55, 3, 4]) - - >>> a = np.arange(5) - >>> np.put(a, 22, -5, mode='clip') - >>> a - array([ 0, 1, 2, 3, -5]) - - """ - raise NotImplementedError('Waiting on interp level method') - - -def swapaxes(a, axis1, axis2): - """ - Interchange two axes of an array. - - Parameters - ---------- - a : array_like - Input array. - axis1 : int - First axis. - axis2 : int - Second axis. - - Returns - ------- - a_swapped : ndarray - If `a` is an ndarray, then a view of `a` is returned; otherwise - a new array is created. - - Examples - -------- - >>> x = np.array([[1,2,3]]) - >>> np.swapaxes(x,0,1) - array([[1], - [2], - [3]]) From noreply at buildbot.pypy.org Sun Nov 10 00:05:50 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 10 Nov 2013 00:05:50 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.5: pypy has no _sysconfigdata Message-ID: <20131109230550.0BFF31C1051@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: stdlib-2.7.5 Changeset: r67917:d2bfba55df7a Date: 2013-11-10 00:19 +0200 http://bitbucket.org/pypy/pypy/changeset/d2bfba55df7a/ Log: pypy has no _sysconfigdata diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py --- a/lib-python/2.7/sysconfig.py +++ b/lib-python/2.7/sysconfig.py @@ -360,9 +360,11 @@ def _init_posix(vars): """Initialize the module as appropriate for POSIX systems.""" - # _sysconfigdata is generated at build time, see _generate_posix_vars() - from _sysconfigdata import build_time_vars - vars.update(build_time_vars) + # in cPython, _sysconfigdata is generated at build time, see _generate_posix_vars() + # in PyPy no such module exists + #from _sysconfigdata import build_time_vars + #vars.update(build_time_vars) + return def _init_non_posix(vars): """Initialize the module as appropriate for NT""" From noreply at buildbot.pypy.org Sun Nov 10 00:05:51 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 10 Nov 2013 00:05:51 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.5: merge default into branch Message-ID: <20131109230551.B81191C1051@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: stdlib-2.7.5 Changeset: r67918:7dd1946e18f5 Date: 2013-11-10 01:03 +0200 http://bitbucket.org/pypy/pypy/changeset/7dd1946e18f5/ Log: merge default into branch diff too long, truncating to 2000 out of 3273 lines diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.7 +Version: 0.8 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7.2" -__version_info__ = (0, 7, 2) +__version__ = "0.8" +__version_info__ = (0, 8) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,5 @@ import types +from .lock import allocate_lock try: callable @@ -61,6 +62,7 @@ # rely on it! It's probably not going to work well.) self._backend = backend + self._lock = allocate_lock() self._parser = cparser.Parser() self._cached_btypes = {} self._parsed_types = types.ModuleType('parsed_types').__dict__ @@ -74,7 +76,8 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - self.BVoidP = self._get_cached_btype(model.voidp_type) + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -95,11 +98,12 @@ if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') - self._parser.parse(csource, override=override) - self._cdefsources.append(csource) - if override: - for cache in self._function_caches: - cache.clear() + with self._lock: + self._parser.parse(csource, override=override) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -109,31 +113,49 @@ library we only look for the actual (untyped) symbols. """ assert isinstance(name, basestring) or name is None - lib, function_cache = _make_ffi_library(self, name, flags) - self._function_caches.append(function_cache) - self._libraries.append(lib) + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) return lib + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + if hasattr(type, 'as_function_pointer'): + really_a_function_type = True + type = type.as_function_pointer() + else: + really_a_function_type = False + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, cfaf = self._parsed_types[cdecl] - if consider_function_as_funcptr and not cfaf: - raise KeyError + result = self._parsed_types[cdecl] except KeyError: - key = cdecl - if not isinstance(cdecl, str): # unicode, on Python 2 - cdecl = cdecl.encode('ascii') - cfaf = consider_function_as_funcptr - type = self._parser.parse_type(cdecl, - consider_function_as_funcptr=cfaf) - btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, cfaf + with self._lock: + result = self._typeof_locked(cdecl) + # + btype, really_a_function_type = result + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) return btype def typeof(self, cdecl): """Parse the C type given as a string and return the - corresponding Python type: '>. + corresponding object. It can also be used on 'cdata' instance to get its C type. """ if isinstance(cdecl, basestring): @@ -144,6 +166,10 @@ res = _builtin_function_type(cdecl) if res is not None: return res + if (isinstance(cdecl, types.FunctionType) + and hasattr(cdecl, '_cffi_base_type')): + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) def sizeof(self, cdecl): @@ -280,14 +306,17 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + with self._lock: + try: + gc_weakrefs = self.gc_weakrefs + except AttributeError: + from .gc_weakref import GcWeakrefs + gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) + return gc_weakrefs.build(cdata, destructor) def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! try: BType = self._cached_btypes[type] except KeyError: @@ -322,7 +351,8 @@ def _pointer_to(self, ctype): from . import model - return model.pointer_cache(self, ctype) + with self._lock: + return model.pointer_cache(self, ctype) def addressof(self, cdata, field=None): """Return the address of a . @@ -342,10 +372,12 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ - self._parser.include(ffi_to_include._parser) - self._cdefsources.append('[') - self._cdefsources.extend(ffi_to_include._cdefsources) - self._cdefsources.append(']') + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) @@ -372,7 +404,7 @@ backendlib = backend.load_library(path, flags) copied_enums = [] # - def make_accessor(name): + def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: tp = ffi._parser._declarations[key] @@ -404,11 +436,17 @@ if enumname not in library.__dict__: library.__dict__[enumname] = enumval copied_enums.append(True) + if name in library.__dict__: + return # - if name in library.__dict__: # copied from an enum value just above, - return # or multithread's race condition raise AttributeError(name) # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + make_accessor_locked(name) + # class FFILibrary(object): def __getattr__(self, name): make_accessor(name) @@ -444,4 +482,5 @@ except (KeyError, AttributeError, TypeError): return None else: - return ffi._get_cached_btype(tp) + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -142,7 +142,7 @@ if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] if line: - msg = 'cannot parse "%s"\n%s' % (line, msg) + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) @@ -217,19 +217,18 @@ # if decl.name: tp = self._get_type(node, partial_length_ok=True) - if self._is_constant_declaration(node): + if self._is_constant_globalvar(node): self._declare('constant ' + decl.name, tp) else: self._declare('variable ' + decl.name, tp) - def parse_type(self, cdecl, consider_function_as_funcptr=False): + def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type, - consider_function_as_funcptr=consider_function_as_funcptr) + return self._get_type(exprnode.type) def _declare(self, name, obj): if name in self._declarations: @@ -249,28 +248,17 @@ return model.ConstPointerType(type) return model.PointerType(type) - def _get_type(self, typenode, convert_array_to_pointer=False, - name=None, partial_length_ok=False, - consider_function_as_funcptr=False): + def _get_type(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): type = self._declarations['typedef ' + typenode.type.names[0]] - if isinstance(type, model.ArrayType): - if convert_array_to_pointer: - return type.item - else: - if (consider_function_as_funcptr and - isinstance(type, model.RawFunctionType)): - return type.as_function_pointer() return type # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type - if convert_array_to_pointer: - return self._get_type_pointer(self._get_type(typenode.type)) if typenode.dim is None: length = None else: @@ -331,10 +319,7 @@ # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - result = self._parse_function_type(typenode, name) - if consider_function_as_funcptr: - result = result.as_function_pointer() - return result + return self._parse_function_type(typenode, name) # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): @@ -365,21 +350,24 @@ isinstance(params[0].type.type, pycparser.c_ast.IdentifierType) and list(params[0].type.type.names) == ['void']): del params[0] - args = [self._get_type(argdeclnode.type, - convert_array_to_pointer=True, - consider_function_as_funcptr=True) + args = [self._as_func_arg(self._get_type(argdeclnode.type)) for argdeclnode in params] result = self._get_type(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _is_constant_declaration(self, typenode, const=False): - if isinstance(typenode, pycparser.c_ast.ArrayDecl): - return self._is_constant_declaration(typenode.type) + def _as_func_arg(self, type): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _is_constant_globalvar(self, typenode): if isinstance(typenode, pycparser.c_ast.PtrDecl): - const = 'const' in typenode.quals - return self._is_constant_declaration(typenode.type, const) + return 'const' in typenode.quals if isinstance(typenode, pycparser.c_ast.TypeDecl): - return const or 'const' in typenode.quals + return 'const' in typenode.quals return False def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): @@ -491,7 +479,7 @@ return tp def _make_partial(self, tp, nested): - if not isinstance(tp, model.StructType): + if not isinstance(tp, model.StructOrUnion): raise api.CDefError("%s cannot be partial" % (tp,)) if not tp.has_c_name() and not nested: raise NotImplementedError("%s is partial but has no C name" %(tp,)) @@ -511,7 +499,7 @@ if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): self._partial_length = True - return None + return '...' # raise api.FFIError("unsupported expression: expected a " "simple numeric constant") diff --git a/lib_pypy/cffi/gc_weakref.py b/lib_pypy/cffi/gc_weakref.py --- a/lib_pypy/cffi/gc_weakref.py +++ b/lib_pypy/cffi/gc_weakref.py @@ -14,6 +14,6 @@ def build(self, cdata, destructor): # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi.typeof(cdata), cdata) + new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) self.data[ref(new_cdata, self.remove)] = destructor, cdata return new_cdata diff --git a/lib_pypy/cffi/lock.py b/lib_pypy/cffi/lock.py new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -192,10 +192,6 @@ _base_pattern = " const *&" _base_pattern_array = "(const *&)" - def build_backend_type(self, ffi, finishlist): - BPtr = PointerType(self.totype).get_cached_btype(ffi, finishlist) - return BPtr - const_voidp_type = ConstPointerType(void_type) @@ -216,10 +212,10 @@ self.item = item self.length = length # - if self.length is None: + if length is None or length == '...': brackets = '&[]' else: - brackets = '&[%d]' % self.length + brackets = '&[%d]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) @@ -227,6 +223,10 @@ return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): + if self.length == '...': + from . import api + raise api.CDefError("cannot render the type %r: unknown length" % + (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) @@ -252,6 +252,7 @@ class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None completed = False + partial = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -303,20 +304,21 @@ return # not completing it: it's an opaque struct # self.completed = 1 - fldtypes = tuple(tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes) # if self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) ffi._backend.complete_struct_or_union(BType, lst, self) # else: + fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # - if isinstance(ftype, ArrayType) and ftype.length is None: + if isinstance(ftype, ArrayType) and ftype.length == '...': # fix the length to match the total size BItemType = ftype.item.get_cached_btype(ffi, finishlist) nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) @@ -327,18 +329,20 @@ ftype = ftype.resolve_length(nlen) self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) - BArrayType = ftype.get_cached_btype(ffi, finishlist) - fldtypes = (fldtypes[:i] + (BArrayType,) + - fldtypes[i+1:]) - continue # - bitemsize = ffi.sizeof(fldtypes[i]) - if bitemsize != fsize: - self._verification_error( - "field '%s.%s' is declared as %d bytes, but is " - "really %d bytes" % (self.name, - self.fldnames[i] or '{}', - bitemsize, fsize)) + BFieldType = ftype.get_cached_btype(ffi, finishlist) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) + fldtypes.append(BFieldType) + # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) ffi._backend.complete_struct_or_union(BType, lst, self, totalsize, totalalignment) @@ -348,11 +352,6 @@ from .ffiplatform import VerificationError raise VerificationError(msg) - -class StructType(StructOrUnion): - kind = 'struct' - partial = False - def check_not_partial(self): if self.partial and self.fixedlayout is None: from . import ffiplatform @@ -361,19 +360,18 @@ def build_backend_type(self, ffi, finishlist): self.check_not_partial() finishlist.append(self) - - return global_cache(self, ffi, 'new_struct_type', + # + return global_cache(self, ffi, 'new_%s_type' % self.kind, self.get_official_name(), key=self) +class StructType(StructOrUnion): + kind = 'struct' + + class UnionType(StructOrUnion): kind = 'union' - def build_backend_type(self, ffi, finishlist): - finishlist.append(self) - return global_cache(self, ffi, 'new_union_type', - self.get_official_name(), key=self) - class EnumType(StructOrUnionOrEnum): kind = 'enum' @@ -387,6 +385,12 @@ self.baseinttype = baseinttype self.build_c_name_with_marker() + def force_the_name(self, forcename): + StructOrUnionOrEnum.force_the_name(self, forcename) + if self.forcename is None: + name = self.get_official_name() + self.forcename = '$' + name.replace(' ', '_') + def check_not_partial(self): if self.partial and not self.partial_resolved: from . import ffiplatform diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -15,7 +15,7 @@ def patch_extension_kwds(self, kwds): pass - def find_module(self, module_name, path, so_suffix): + def find_module(self, module_name, path, so_suffixes): try: f, filename, descr = imp.find_module(module_name, path) except ImportError: @@ -25,7 +25,7 @@ # Note that after a setuptools installation, there are both .py # and .so files with the same basename. The code here relies on # imp.find_module() locating the .so in priority. - if descr[0] != so_suffix: + if descr[0] not in so_suffixes: return None return filename @@ -280,8 +280,8 @@ return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, model.ArrayType): - return '_cffi_from_c_deref((char *)%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) elif isinstance(tp, model.StructType): if tp.fldnames is None: raise TypeError("'%s' is used as %s, but is opaque" % ( @@ -464,11 +464,14 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return _cffi_get_struct_layout(nums);') @@ -491,7 +494,7 @@ # function = getattr(module, layoutfuncname) layout = function() - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -528,9 +531,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -566,7 +570,7 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True): + vartp=None, delayed=True, size_too=False): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -597,6 +601,15 @@ '(unsigned long long)(%s));' % (name,)) prnt(' if (o == NULL)') prnt(' return -1;') + if size_too: + prnt(' {') + prnt(' PyObject *o1 = o;') + prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));' + % (name,)) + prnt(' Py_DECREF(o1);') + prnt(' if (o == NULL)') + prnt(' return -1;') + prnt(' }') prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name) prnt(' Py_DECREF(o);') prnt(' if (res < 0)') @@ -677,15 +690,16 @@ def _generate_cpy_variable_collecttype(self, tp, name): if isinstance(tp, model.ArrayType): - self._do_collect_type(tp) + tp_ptr = model.PointerType(tp.item) else: tp_ptr = model.PointerType(tp) - self._do_collect_type(tp_ptr) + self._do_collect_type(tp_ptr) def _generate_cpy_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): tp_ptr = model.PointerType(tp.item) - self._generate_cpy_const(False, name, tp, vartp=tp_ptr) + self._generate_cpy_const(False, name, tp, vartp=tp_ptr, + size_too = (tp.length == '...')) else: tp_ptr = model.PointerType(tp) self._generate_cpy_const(False, name, tp_ptr, category='var') @@ -694,11 +708,29 @@ _loading_cpy_variable = _loaded_noop def _loaded_cpy_variable(self, tp, name, module, library): + value = getattr(library, name) if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the - return # sense that "a=..." is forbidden + # sense that "a=..." is forbidden + if tp.length == '...': + assert isinstance(value, tuple) + (value, size) = value + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. - ptr = getattr(library, name) + ptr = value delattr(library, name) def getter(library): return ptr[0] diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -20,15 +20,15 @@ # up in kwds['export_symbols']. kwds.setdefault('export_symbols', self.export_symbols) - def find_module(self, module_name, path, so_suffix): - basename = module_name + so_suffix - if path is None: - path = sys.path - for dirname in path: - filename = os.path.join(dirname, basename) - if os.path.isfile(filename): - return filename - return None + def find_module(self, module_name, path, so_suffixes): + for so_suffix in so_suffixes: + basename = module_name + so_suffix + if path is None: + path = sys.path + for dirname in path: + filename = os.path.join(dirname, basename) + if os.path.isfile(filename): + return filename def collect_types(self): pass # not needed in the generic engine @@ -173,6 +173,7 @@ newfunction = self._load_constant(False, tp, name, module) else: indirections = [] + base_tp = tp if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): indirect_args = [] for i, typ in enumerate(tp.args): @@ -186,16 +187,18 @@ wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) for i, typ in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, typ) + newfunction = self._make_struct_wrapper(newfunction, i, typ, + base_tp) setattr(library, name, newfunction) type(library)._cffi_dir.append(name) - def _make_struct_wrapper(self, oldfunc, i, tp): + def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): backend = self.ffi._backend BType = self.ffi._get_cached_btype(tp) def newfunc(*args): args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] return oldfunc(*args) + newfunc._cffi_base_type = base_tp return newfunc # ---------- @@ -252,11 +255,14 @@ prnt(' static ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return nums[i];') @@ -270,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi.typeof("ssize_t(*)(ssize_t)") + BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -279,7 +285,7 @@ if x < 0: break layout.append(x) num += 1 - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -316,9 +322,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -379,15 +386,17 @@ def _load_constant(self, is_int, tp, name, module): funcname = '_cffi_const_%s' % name if is_int: - BFunc = self.ffi.typeof("int(*)(long long*)") + BType = self.ffi._typeof_locked("long long*")[0] + BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) - p = self.ffi.new("long long*") + p = self.ffi.new(BType) negative = function(p) value = int(p[0]) if value < 0 and not negative: - value += (1 << (8*self.ffi.sizeof("long long"))) + BLongLong = self.ffi._typeof_locked("long long")[0] + value += (1 << (8*self.ffi.sizeof(BLongLong))) else: - BFunc = self.ffi.typeof(tp.get_c_name('(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() return value @@ -431,10 +440,11 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BFunc = self.ffi.typeof("int(*)(char*)") + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = '_cffi_e_%s_%s' % (prefix, name) function = module.load_function(BFunc, funcname) - p = self.ffi.new("char[]", 256) + p = self.ffi.new(BType, 256) if function(p) < 0: error = self.ffi.string(p) if sys.version_info >= (3,): @@ -465,6 +475,14 @@ def _generate_gen_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): + if tp.length == '...': + prnt = self._prnt + funcname = '_cffi_sizeof_%s' % (name,) + self.export_symbols.append(funcname) + prnt("size_t %s(void)" % funcname) + prnt("{") + prnt(" return sizeof(%s);" % (name,)) + prnt("}") tp_ptr = model.PointerType(tp.item) self._generate_gen_const(False, name, tp_ptr) else: @@ -476,6 +494,18 @@ def _loaded_gen_variable(self, tp, name, module, library): if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the # sense that "a=..." is forbidden + if tp.length == '...': + funcname = '_cffi_sizeof_%s' % (name,) + BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0] + function = module.load_function(BFunc, funcname) + size = function() + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) tp_ptr = model.PointerType(tp.item) value = self._load_constant(False, tp_ptr, name, module) # 'value' is a which we have to replace with @@ -489,7 +519,7 @@ # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. funcname = '_cffi_var_%s' % name - BFunc = self.ffi.typeof(tp.get_c_name('*(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0] function = module.load_function(BFunc, funcname) ptr = function() def getter(library): diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -31,7 +31,7 @@ k2 = k2.lstrip('0').rstrip('L') modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key, k1, k2) - suffix = _get_so_suffix() + suffix = _get_so_suffixes()[0] self.tmpdir = tmpdir or _caller_dir_pycache() self.sourcefilename = os.path.join(self.tmpdir, modulename + '.c') self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) @@ -42,18 +42,21 @@ def write_source(self, file=None): """Write the C source code. It is produced in 'self.sourcefilename', which can be tweaked beforehand.""" - if self._has_source and file is None: - raise ffiplatform.VerificationError("source code already written") - self._write_source(file) + with self.ffi._lock: + if self._has_source and file is None: + raise ffiplatform.VerificationError( + "source code already written") + self._write_source(file) def compile_module(self): """Write the C source code (if not done already) and compile it. This produces a dynamic link library in 'self.modulefilename'.""" - if self._has_module: - raise ffiplatform.VerificationError("module already compiled") - if not self._has_source: - self._write_source() - self._compile_module() + with self.ffi._lock: + if self._has_module: + raise ffiplatform.VerificationError("module already compiled") + if not self._has_source: + self._write_source() + self._compile_module() def load_library(self): """Get a C module from this Verifier instance. @@ -62,11 +65,14 @@ operations to the C module. If necessary, the C code is written and compiled first. """ - if not self._has_module: - self._locate_module() + with self.ffi._lock: if not self._has_module: - self.compile_module() - return self._load_library() + self._locate_module() + if not self._has_module: + if not self._has_source: + self._write_source() + self._compile_module() + return self._load_library() def get_module_name(self): basename = os.path.basename(self.modulefilename) @@ -81,7 +87,9 @@ def get_extension(self): if not self._has_source: - self._write_source() + with self.ffi._lock: + if not self._has_source: + self._write_source() sourcename = ffiplatform.maybe_relative_path(self.sourcefilename) modname = self.get_module_name() return ffiplatform.get_extension(sourcename, modname, **self.kwds) @@ -103,7 +111,7 @@ else: path = None filename = self._vengine.find_module(self.get_module_name(), path, - _get_so_suffix()) + _get_so_suffixes()) if filename is None: return self.modulefilename = filename @@ -193,7 +201,7 @@ if keep_so: suffix = '.c' # only remove .c files else: - suffix = _get_so_suffix().lower() + suffix = _get_so_suffixes()[0].lower() for fn in filelist: if fn.lower().startswith('_cffi_') and ( fn.lower().endswith(suffix) or fn.lower().endswith('.c')): @@ -213,15 +221,20 @@ except OSError: pass -def _get_so_suffix(): +def _get_so_suffixes(): + suffixes = [] for suffix, mode, type in imp.get_suffixes(): if type == imp.C_EXTENSION: - return suffix - # bah, no C_EXTENSION available. Occurs on pypy without cpyext - if sys.platform == 'win32': - return ".pyd" - else: - return ".so" + suffixes.append(suffix) + + if not suffixes: + # bah, no C_EXTENSION available. Occurs on pypy without cpyext + if sys.platform == 'win32': + suffixes = [".pyd"] + else: + suffixes = [".so"] + + return suffixes def _ensure_dir(filename): try: diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -47,3 +47,7 @@ * post announcement on morepypy.blogspot.com * send announcements to pypy-dev, python-list, python-announce, python-dev ... + +* add a tag on jitviewer that corresponds to pypy release +* add a tag on codespeed that corresponds to pypy release + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -91,6 +91,7 @@ .. branch: safe-win-mmap .. branch: boolean-indexing-cleanup .. branch: cpyext-best_base +.. branch: cpyext-int .. branch: fileops2 .. branch: nobold-backtrace diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -7,7 +7,7 @@ appleveldefs = { } interpleveldefs = { - '__version__': 'space.wrap("0.7")', + '__version__': 'space.wrap("0.8")', 'load_library': 'libraryobj.load_library', diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -19,9 +19,9 @@ _cdata = lltype.nullptr(rffi.CCHARP.TO) def __init__(self, space, cdata, ctype): - from pypy.module._cffi_backend import ctypeprim + from pypy.module._cffi_backend import ctypeobj assert lltype.typeOf(cdata) == rffi.CCHARP - assert isinstance(ctype, ctypeprim.W_CType) + assert isinstance(ctype, ctypeobj.W_CType) self.space = space self._cdata = cdata # don't forget keepalive_until_here! self.ctype = ctype @@ -211,7 +211,21 @@ keepalive_until_here(w_value) return # + # A fast path for [0:N] = "somestring". + from pypy.module._cffi_backend import ctypeprim space = self.space + if (space.isinstance_w(w_value, space.w_str) and + isinstance(ctitem, ctypeprim.W_CTypePrimitiveChar)): + from rpython.rtyper.annlowlevel import llstr + from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw + value = space.str_w(w_value) + if len(value) != length: + raise operationerrfmt(space.w_ValueError, + "need a string of length %d, got %d", + length, len(value)) + copy_string_to_raw(llstr(value), cdata, 0, length) + return + # w_iter = space.iter(w_value) for i in range(length): try: @@ -245,19 +259,22 @@ space = self.space if isinstance(w_other, W_CData): from pypy.module._cffi_backend import ctypeptr, ctypearray + from pypy.module._cffi_backend import ctypevoid ct = w_other.ctype if isinstance(ct, ctypearray.W_CTypeArray): ct = ct.ctptr # if (ct is not self.ctype or not isinstance(ct, ctypeptr.W_CTypePointer) or - ct.ctitem.size <= 0): + (ct.ctitem.size <= 0 and not ct.is_void_ptr)): raise operationerrfmt(space.w_TypeError, "cannot subtract cdata '%s' and cdata '%s'", self.ctype.name, ct.name) # + itemsize = ct.ctitem.size + if itemsize <= 0: itemsize = 1 diff = (rffi.cast(lltype.Signed, self._cdata) - - rffi.cast(lltype.Signed, w_other._cdata)) // ct.ctitem.size + rffi.cast(lltype.Signed, w_other._cdata)) // itemsize return space.wrap(diff) # return self._add_or_sub(w_other, -1) @@ -441,6 +458,7 @@ __getitem__ = interp2app(W_CData.getitem), __setitem__ = interp2app(W_CData.setitem), __add__ = interp2app(W_CData.add), + __radd__ = interp2app(W_CData.add), __sub__ = interp2app(W_CData.sub), __getattr__ = interp2app(W_CData.getattr), __setattr__ = interp2app(W_CData.setattr), diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -34,19 +34,8 @@ datasize = self.size # if datasize < 0: - if (space.isinstance_w(w_init, space.w_list) or - space.isinstance_w(w_init, space.w_tuple)): - length = space.int_w(space.len(w_init)) - elif space.isinstance_w(w_init, space.w_basestring): - # from a string, we add the null terminator - length = space.int_w(space.len(w_init)) + 1 - else: - length = space.getindex_w(w_init, space.w_OverflowError) - if length < 0: - raise OperationError(space.w_ValueError, - space.wrap("negative array length")) - w_init = space.w_None - # + from pypy.module._cffi_backend import misc + w_init, length = misc.get_new_array_length(space, w_init) try: datasize = ovfcheck(length * self.ctitem.size) except OverflowError: diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -2,27 +2,25 @@ Pointers. """ -from pypy.interpreter.error import OperationError, operationerrfmt, wrap_oserror - from rpython.rlib import rposix from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import ovfcheck +from rpython.rtyper.annlowlevel import llstr, llunicode from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw, copy_unicode_to_raw +from pypy.interpreter.error import OperationError, operationerrfmt, wrap_oserror from pypy.module._cffi_backend import cdataobj, misc, ctypeprim, ctypevoid from pypy.module._cffi_backend.ctypeobj import W_CType class W_CTypePtrOrArray(W_CType): - _attrs_ = ['ctitem', 'can_cast_anything', 'is_struct_ptr', - 'length'] - _immutable_fields_ = ['ctitem', 'can_cast_anything', 'is_struct_ptr', - 'length'] + _attrs_ = ['ctitem', 'can_cast_anything', 'length'] + _immutable_fields_ = ['ctitem', 'can_cast_anything', 'length'] length = -1 def __init__(self, space, size, extra, extra_position, ctitem, could_cast_anything=True): - from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion name, name_position = ctitem.insert_name(extra, extra_position) W_CType.__init__(self, space, size, name, name_position) # this is the "underlying type": @@ -31,7 +29,6 @@ # - for functions, it is the return type self.ctitem = ctitem self.can_cast_anything = could_cast_anything and ctitem.cast_anything - self.is_struct_ptr = isinstance(ctitem, W_CTypeStructOrUnion) def is_char_ptr_or_array(self): return isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveChar) @@ -90,8 +87,7 @@ "initializer string is too long for '%s'" " (got %d characters)", self.name, n) - for i in range(n): - cdata[i] = s[i] + copy_string_to_raw(llstr(s), cdata, 0, n) if n != self.length: cdata[n] = '\x00' elif isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveUniChar): @@ -105,8 +101,7 @@ " (got %d characters)", self.name, n) unichardata = rffi.cast(rffi.CWCHARP, cdata) - for i in range(n): - unichardata[i] = s[i] + copy_unicode_to_raw(llunicode(s), unichardata, 0, n) if n != self.length: unichardata[n] = u'\x00' else: @@ -157,7 +152,6 @@ return cdataobj.W_CData(self.space, ptrdata, self) def convert_from_object(self, cdata, w_ob): - space = self.space if not isinstance(w_ob, cdataobj.W_CData): raise self._convert_error("cdata pointer", w_ob) other = w_ob.ctype @@ -197,6 +191,7 @@ W_CTypePtrBase.__init__(self, space, size, extra, 2, ctitem) def newp(self, w_init): + from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion space = self.space ctitem = self.ctitem datasize = ctitem.size @@ -204,10 +199,15 @@ raise operationerrfmt(space.w_TypeError, "cannot instantiate ctype '%s' of unknown size", self.name) - if self.is_struct_ptr: + if isinstance(ctitem, W_CTypeStructOrUnion): # 'newp' on a struct-or-union pointer: in this case, we return # a W_CDataPtrToStruct object which has a strong reference # to a W_CDataNewOwning that really contains the structure. + # + if ctitem.with_var_array and not space.is_w(w_init, space.w_None): + datasize = ctitem.convert_struct_from_object( + lltype.nullptr(rffi.CCHARP.TO), w_init, datasize) + # cdatastruct = cdataobj.W_CDataNewOwning(space, datasize, ctitem) cdata = cdataobj.W_CDataPtrToStructOrUnion(space, cdatastruct._cdata, @@ -238,11 +238,15 @@ def add(self, cdata, i): space = self.space ctitem = self.ctitem + itemsize = ctitem.size if ctitem.size < 0: - raise operationerrfmt(space.w_TypeError, + if self.is_void_ptr: + itemsize = 1 + else: + raise operationerrfmt(space.w_TypeError, "ctype '%s' points to items of unknown size", self.name) - p = rffi.ptradd(cdata, i * self.ctitem.size) + p = rffi.ptradd(cdata, i * itemsize) return cdataobj.W_CData(space, p, self) def cast(self, w_ob): @@ -298,7 +302,6 @@ def convert_argument_from_object(self, cdata, w_ob): from pypy.module._cffi_backend.ctypefunc import set_mustfree_flag - space = self.space result = (not isinstance(w_ob, cdataobj.W_CData) and self._prepare_pointer_call_argument(w_ob, cdata)) if result == 0: @@ -320,7 +323,8 @@ space = self.space ctype2 = cdata.ctype if (isinstance(ctype2, W_CTypeStructOrUnion) or - (isinstance(ctype2, W_CTypePtrOrArray) and ctype2.is_struct_ptr)): + (isinstance(ctype2, W_CTypePtrOrArray) and + isinstance(ctype2.ctitem, W_CTypeStructOrUnion))): ptrdata = rffi.ptradd(cdata._cdata, offset) return cdataobj.W_CData(space, ptrdata, self) else: diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -9,7 +9,8 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, intmask -from rpython.rtyper.lltypesystem import rffi +from rpython.rlib.rarithmetic import ovfcheck +from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module._cffi_backend import cdataobj, ctypeprim, misc from pypy.module._cffi_backend.ctypeobj import W_CType @@ -17,12 +18,13 @@ class W_CTypeStructOrUnion(W_CType): _immutable_fields_ = ['alignment?', 'fields_list?', 'fields_dict?', - 'custom_field_pos?'] + 'custom_field_pos?', 'with_var_array?'] # fields added by complete_struct_or_union(): alignment = -1 fields_list = None fields_dict = None custom_field_pos = False + with_var_array = False def __init__(self, space, name): W_CType.__init__(self, space, -1, name, len(name)) @@ -90,12 +92,13 @@ pass def convert_from_object(self, cdata, w_ob): - space = self.space - if self._copy_from_same(cdata, w_ob): - return + if not self._copy_from_same(cdata, w_ob): + self.convert_struct_from_object(cdata, w_ob, optvarsize=-1) + def convert_struct_from_object(self, cdata, w_ob, optvarsize): self._check_only_one_argument_for_union(w_ob) + space = self.space if (space.isinstance_w(w_ob, space.w_list) or space.isinstance_w(w_ob, space.w_tuple)): lst_w = space.listview(w_ob) @@ -104,7 +107,9 @@ "too many initializers for '%s' (got %d)", self.name, len(lst_w)) for i in range(len(lst_w)): - self.fields_list[i].write(cdata, lst_w[i]) + optvarsize = self.fields_list[i].write_v(cdata, lst_w[i], + optvarsize) + return optvarsize elif space.isinstance_w(w_ob, space.w_dict): lst_w = space.fixedview(w_ob) @@ -116,11 +121,16 @@ except KeyError: space.raise_key_error(w_key) assert 0 - cf.write(cdata, space.getitem(w_ob, w_key)) + optvarsize = cf.write_v(cdata, space.getitem(w_ob, w_key), + optvarsize) + return optvarsize else: - raise self._convert_error("list or tuple or dict or struct-cdata", - w_ob) + if optvarsize == -1: + msg = "list or tuple or dict or struct-cdata" + else: + msg = "list or tuple or dict" + raise self._convert_error(msg, w_ob) @jit.elidable def _getcfield_const(self, attr): @@ -192,6 +202,37 @@ else: self.ctype.convert_from_object(cdata, w_ob) + def write_v(self, cdata, w_ob, optvarsize): + # a special case for var-sized C99 arrays + from pypy.module._cffi_backend import ctypearray + ct = self.ctype + if isinstance(ct, ctypearray.W_CTypeArray) and ct.length < 0: + space = ct.space + w_ob, varsizelength = misc.get_new_array_length(space, w_ob) + if optvarsize != -1: + # in this mode, the only purpose of this function is to compute + # the real size of the structure from a var-sized C99 array + assert cdata == lltype.nullptr(rffi.CCHARP.TO) + itemsize = ct.ctitem.size + try: + varsize = ovfcheck(itemsize * varsizelength) + size = ovfcheck(self.offset + varsize) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("array size would overflow a ssize_t")) + assert size >= 0 + return max(size, optvarsize) + # if 'value' was only an integer, get_new_array_length() returns + # w_ob = space.w_None. Detect if this was the case, + # and if so, stop here, leaving the content uninitialized + # (it should be zero-initialized from somewhere else). + if space.is_w(w_ob, space.w_None): + return optvarsize + # + if optvarsize == -1: + self.write(cdata, w_ob) + return optvarsize + def convert_bitfield_to_object(self, cdata): ctype = self.ctype space = ctype.space diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py --- a/pypy/module/_cffi_backend/misc.py +++ b/pypy/module/_cffi_backend/misc.py @@ -278,6 +278,22 @@ # ____________________________________________________________ +def get_new_array_length(space, w_value): + if (space.isinstance_w(w_value, space.w_list) or + space.isinstance_w(w_value, space.w_tuple)): + return (w_value, space.int_w(space.len(w_value))) + elif space.isinstance_w(w_value, space.w_basestring): + # from a string, we add the null terminator + return (w_value, space.int_w(space.len(w_value)) + 1) + else: + explicitlength = space.getindex_w(w_value, space.w_OverflowError) + if explicitlength < 0: + raise OperationError(space.w_ValueError, + space.wrap("negative array length")) + return (space.w_None, explicitlength) + +# ____________________________________________________________ + @specialize.arg(0) def _raw_memcopy_tp(TPP, source, dest): # in its own function: LONGLONG may make the whole function jit-opaque diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -158,8 +158,10 @@ fields_list = [] fields_dict = {} custom_field_pos = False + with_var_array = False - for w_field in fields_w: + for i in range(len(fields_w)): + w_field = fields_w[i] field_w = space.fixedview(w_field) if not (2 <= len(field_w) <= 4): raise OperationError(space.w_TypeError, @@ -176,7 +178,11 @@ "duplicate field name '%s'", fname) # if ftype.size < 0: - raise operationerrfmt(space.w_TypeError, + if (isinstance(ftype, ctypearray.W_CTypeArray) and fbitsize < 0 + and (i == len(fields_w) - 1 or foffset != -1)): + with_var_array = True + else: + raise operationerrfmt(space.w_TypeError, "field '%s.%s' has ctype '%s' of unknown size", w_ctype.name, fname, ftype.name) # @@ -235,7 +241,8 @@ fields_list.append(fld) fields_dict[fname] = fld - boffset += ftype.size * 8 + if ftype.size >= 0: + boffset += ftype.size * 8 prev_bitfield_size = 0 else: @@ -359,6 +366,7 @@ w_ctype.fields_list = fields_list w_ctype.fields_dict = fields_dict w_ctype.custom_field_pos = custom_field_pos + w_ctype.with_var_array = with_var_array # ____________________________________________________________ diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -542,6 +542,7 @@ assert repr(a) == "" % ( 3*5*size_of_int(),) assert repr(a + 0).startswith(" x[i]=y" if space.isinstance_w(w_idx, space.w_slice): @@ -869,9 +868,6 @@ self.buffer[i] = w_item.buffer[j] j += 1 - # We can't look into this function until ptradd works with things (in the - # JIT) other than rffi.CCHARP - @jit.dont_look_inside def delitem(self, space, i, j): if i < 0: i += self.len @@ -907,16 +903,23 @@ lltype.free(oldbuffer, flavor='raw') # Add and mul methods - def descr_add(self, space, w_other): if not isinstance(w_other, W_Array): return space.w_NotImplemented a = mytype.w_class(space) a.setlen(self.len + w_other.len, overallocate=False) - for i in range(self.len): - a.buffer[i] = self.buffer[i] - for i in range(w_other.len): - a.buffer[i + self.len] = w_other.buffer[i] + if self.len: + rffi.c_memcpy( + rffi.cast(rffi.VOIDP, a.buffer), + rffi.cast(rffi.VOIDP, self.buffer), + self.len * mytype.bytes + ) + if w_other.len: + rffi.c_memcpy( + rffi.cast(rffi.VOIDP, rffi.ptradd(a.buffer, self.len)), + rffi.cast(rffi.VOIDP, w_other.buffer), + w_other.len * mytype.bytes + ) return a def descr_inplace_add(self, space, w_other): @@ -925,8 +928,12 @@ oldlen = self.len otherlen = w_other.len self.setlen(oldlen + otherlen) - for i in range(otherlen): - self.buffer[oldlen + i] = w_other.buffer[i] + if otherlen: + rffi.c_memcpy( + rffi.cast(rffi.VOIDP, rffi.ptradd(self.buffer, oldlen)), + rffi.cast(rffi.VOIDP, w_other.buffer), + otherlen * mytype.bytes + ) return self def descr_mul(self, space, w_repeat): diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -21,8 +21,17 @@ "Type description of PyIntObject" make_typedescr(space.w_int.instancetypedef, basestruct=PyIntObject.TO, + attach=int_attach, realize=int_realize) +def int_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyIntObject with the given int object. The + value must not be modified. + """ + py_int = rffi.cast(PyIntObject, py_obj) + py_int.c_ob_ival = space.int_w(w_obj) + def int_realize(space, obj): intval = rffi.cast(lltype.Signed, rffi.cast(PyIntObject, obj).c_ob_ival) w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py --- a/pypy/module/cpyext/test/test_intobject.py +++ b/pypy/module/cpyext/test/test_intobject.py @@ -97,7 +97,7 @@ return (PyObject *)enumObj; """), - ], + ], prologue=""" typedef struct { @@ -166,3 +166,24 @@ assert isinstance(a, int) assert a == int(a) == 42 assert a.name == "ULTIMATE_ANSWER" + + def test_int_cast(self): + mod = self.import_extension('foo', [ + #prove it works for ints + ("test_int", "METH_NOARGS", + """ + PyObject * obj = PyInt_FromLong(42); + if (!PyInt_Check(obj)) { + Py_DECREF(obj); + PyErr_SetNone(PyExc_ValueError); + return NULL; + } + PyObject * val = PyInt_FromLong(((PyIntObject *)obj)->ob_ival); + Py_DECREF(obj); + return val; + """ + ), + ]) + i = mod.test_int() + assert isinstance(i, int) + assert i == 42 diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -1,5 +1,3 @@ -import py - from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype @@ -286,3 +284,5 @@ arr = mod.test_FromObject() dt = mod.test_DescrFromType(11) assert dt.num == 11 + + diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -519,10 +519,13 @@ # by converting nonnative byte order. if self.is_scalar(): return space.wrap(0) - s = self.get_dtype().name - if not self.get_dtype().is_native(): - s = s[1:] - dtype = interp_dtype.get_dtype_cache(space).dtypes_by_name[s] + if not self.get_dtype().is_flexible_type(): + s = self.get_dtype().name + if not self.get_dtype().is_native(): + s = s[1:] + dtype = interp_dtype.get_dtype_cache(space).dtypes_by_name[s] + else: + dtype = self.get_dtype() contig = self.implementation.astype(space, dtype) return contig.argsort(space, w_axis) diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -12,8 +12,7 @@ exp = sorted(range(len(exp)), key=exp.__getitem__) c = a.copy() res = a.argsort() - assert (res == exp).all(), \ - 'a,res,dtype %r,%r,%r' % (a,res,dtype) + assert (res == exp).all(), '%r\n%r\n%r' % (a,res,exp) assert (a == c).all() # not modified a = arange(100, dtype=dtype) @@ -60,11 +59,10 @@ for dtype in ['int', 'float', 'int16', 'float32', 'uint64', 'i2', complex]: a = array([6, 4, -1, 3, 8, 3, 256+20, 100, 101], dtype=dtype) - b = sorted(list(a)) - c = a.copy() - a.sort() - assert (a == b).all(), \ - 'a,orig,dtype %r,%r,%r' % (a,c,dtype) + exp = sorted(list(a)) + res = a.copy() + res.sort() + assert (res == exp).all(), '%r\n%r\n%r' % (a,res,exp) a = arange(100, dtype=dtype) c = a.copy() @@ -85,7 +83,6 @@ #assert (a == b).all(), \ # 'a,orig,dtype %r,%r,%r' % (a,c,dtype) - # tests from numpy/tests/test_multiarray.py def test_sort_corner_cases(self): # test ordering for floats and complex containing nans. It is only @@ -307,7 +304,6 @@ assert (r == array([('a', 1), ('c', 3), ('b', 255), ('d', 258)], dtype=mydtype)).all() - # tests from numpy/tests/test_regression.py def test_sort_bigendian(self): skip('not implemented yet') @@ -325,3 +321,13 @@ y = fromstring("\x00\x01\x00\x02", dtype="S2") x.sort(kind='q') assert (x == y).all() + + def test_string_mergesort(self): + import numpypy as np + import sys + x = np.array(['a'] * 32) + if '__pypy__' in sys.builtin_module_names: + exc = raises(NotImplementedError, "x.argsort(kind='m')") + assert 'non-numeric types' in exc.value.message + else: + assert (x.argsort(kind='m') == np.arange(32)).all() diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -163,6 +163,8 @@ if step == 0: # index only return space.wrap(self.mmap.getitem(start)) elif step == 1: + if stop - start < 0: + return space.wrap("") return space.wrap(self.mmap.getslice(start, stop - start)) else: res = "".join([self.mmap.getitem(i) diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -525,6 +525,8 @@ m = mmap(f.fileno(), 6) assert m[-3:7] == "bar" + assert m[1:0:1] == "" + f.close() def test_sequence_type(self): diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -131,6 +131,11 @@ if hasattr(os, 'fpathconf'): interpleveldefs['fpathconf'] = 'interp_posix.fpathconf' interpleveldefs['pathconf_names'] = 'space.wrap(os.pathconf_names)' + if hasattr(os, 'pathconf'): + interpleveldefs['pathconf'] = 'interp_posix.pathconf' + if hasattr(os, 'confstr'): + interpleveldefs['confstr'] = 'interp_posix.confstr' + interpleveldefs['confstr_names'] = 'space.wrap(os.confstr_names)' if hasattr(os, 'ttyname'): interpleveldefs['ttyname'] = 'interp_posix.ttyname' if hasattr(os, 'getloadavg'): @@ -155,7 +160,9 @@ for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', - 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs']: + 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs', + 'setgroups', 'initgroups', 'tcgetpgrp', 'tcsetpgrp', + 'getresuid', 'getresgid', 'setresuid', 'setresgid']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) # not visible via os, inconsistency in nt: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -987,7 +987,39 @@ Return list of supplemental group IDs for the process. """ - return space.newlist([space.wrap(e) for e in os.getgroups()]) + try: + list = os.getgroups() + except OSError, e: + raise wrap_oserror(space, e) + return space.newlist([space.wrap(e) for e in list]) + +def setgroups(space, w_list): + """ setgroups(list) + + Set the groups of the current process to list. + """ + list = [] + for w_gid in space.unpackiterable(w_list): + gid = space.int_w(w_gid) + check_uid_range(space, gid) + list.append(gid) + try: + os.setgroups(list[:]) + except OSError, e: + raise wrap_oserror(space, e) + + at unwrap_spec(username=str, gid=c_gid_t) +def initgroups(space, username, gid): + """ initgroups(username, gid) -> None + + Call the system initgroups() to initialize the group access list with all of + the groups of which the specified username is a member, plus the specified + group id. + """ + try: + os.initgroups(username, gid) + except OSError, e: + raise wrap_oserror(space, e) def getpgrp(space): """ getpgrp() -> pgrp @@ -1089,6 +1121,77 @@ raise wrap_oserror(space, e) return space.w_None + at unwrap_spec(fd=c_int) +def tcgetpgrp(space, fd): + """ tcgetpgrp(fd) -> pgid + + Return the process group associated with the terminal given by a fd. + """ + try: + pgid = os.tcgetpgrp(fd) + except OSError, e: + raise wrap_oserror(space, e) + return space.wrap(pgid) + + at unwrap_spec(fd=c_int, pgid=c_gid_t) +def tcsetpgrp(space, fd, pgid): + """ tcsetpgrp(fd, pgid) + + Set the process group associated with the terminal given by a fd. + """ + try: + os.tcsetpgrp(fd, pgid) + except OSError, e: + raise wrap_oserror(space, e) + +def getresuid(space): + """ getresuid() -> (ruid, euid, suid) + + Get tuple of the current process's real, effective, and saved user ids. + """ + try: + (ruid, euid, suid) = os.getresuid() + except OSError, e: + raise wrap_oserror(space, e) + return space.newtuple([space.wrap(ruid), + space.wrap(euid), + space.wrap(suid)]) + From noreply at buildbot.pypy.org Sun Nov 10 04:01:59 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 10 Nov 2013 04:01:59 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20131110030159.59E9A1C030D@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67921:8085d3e79ef8 Date: 2013-11-09 19:01 -0800 http://bitbucket.org/pypy/pypy/changeset/8085d3e79ef8/ Log: merged upstream diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.7 +Version: 0.8 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -393,7 +393,8 @@ negative = function(p) value = int(p[0]) if value < 0 and not negative: - value += (1 << (8*self.ffi.sizeof("long long"))) + BLongLong = self.ffi._typeof_locked("long long")[0] + value += (1 << (8*self.ffi.sizeof(BLongLong))) else: BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py @@ -708,11 +708,14 @@ def test_define_int(): ffi = FFI() ffi.cdef("#define FOO ...\n" - "\t#\tdefine\tBAR\t...\t") + "\t#\tdefine\tBAR\t...\t\n" + "#define BAZ ...\n") lib = ffi.verify("#define FOO 42\n" - "#define BAR (-44)\n") + "#define BAR (-44)\n" + "#define BAZ 0xffffffffffffffffLL\n") assert lib.FOO == 42 assert lib.BAR == -44 + assert lib.BAZ == 0xffffffffffffffff def test_access_variable(): ffi = FFI() From noreply at buildbot.pypy.org Sun Nov 10 04:01:56 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 10 Nov 2013 04:01:56 +0100 (CET) Subject: [pypy-commit] pypy default: Actually do this Message-ID: <20131110030156.EBA1D1C0162@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67919:91d71c6427da Date: 2013-11-09 14:23 -0800 http://bitbucket.org/pypy/pypy/changeset/91d71c6427da/ Log: Actually do this diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -95,6 +95,9 @@ if not self._copy_from_same(cdata, w_ob): self.convert_struct_from_object(cdata, w_ob, optvarsize=-1) + @jit.look_inside_iff( + lambda self, cdata, w_ob, optvarsize: jit.isvirtual(w_ob) + ) def convert_struct_from_object(self, cdata, w_ob, optvarsize): self._check_only_one_argument_for_union(w_ob) diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -327,4 +327,5 @@ log = self.run(main, [300]) loop, = log.loops_by_filename(self.filepath) - assert False, "XXX: fill this in" + assert loop.match(""" + """) From noreply at buildbot.pypy.org Sun Nov 10 04:01:58 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 10 Nov 2013 04:01:58 +0100 (CET) Subject: [pypy-commit] pypy default: a test for this Message-ID: <20131110030158.154F01C019D@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r67920:ef63f35241ad Date: 2013-11-09 18:57 -0800 http://bitbucket.org/pypy/pypy/changeset/ef63f35241ad/ Log: a test for this diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -328,4 +328,45 @@ log = self.run(main, [300]) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + i161 = int_lt(i160, i43) + guard_true(i161, descr=...) + i162 = int_add(i160, 1) + setfield_gc(p22, i162, descr=) + guard_not_invalidated(descr=...) + p163 = force_token() + p164 = force_token() + p165 = getarrayitem_gc(p67, 0, descr=) + guard_value(p165, ConstPtr(ptr70), descr=...) + p166 = getfield_gc(p165, descr=) + guard_value(p166, ConstPtr(ptr72), descr=...) + p167 = call(ConstClass(_ll_0_alloc_with_del___), descr=) + guard_no_exception(descr=...) + i168 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 12, descr=) + setfield_gc(p167, 0, descr=) + setfield_gc(p167, ConstPtr(ptr86), descr=) + guard_no_exception(descr=...) + i169 = int_add(i168, i97) + i170 = int_sub(i160, i106) + setfield_gc(p167, i168, descr=) + setfield_gc(p167, ConstPtr(ptr89), descr=) + i171 = uint_gt(i170, i108) + guard_false(i171, descr=...) + i172 = int_sub(i160, -2147483648) + i173 = int_and(i172, 4294967295) + i174 = int_add(i173, -2147483648) + setarrayitem_raw(i169, 0, i174, descr=) + i175 = int_add(i168, i121) + i176 = int_sub(i160, i130) + i177 = uint_gt(i176, i132) + guard_false(i177, descr=...) + setarrayitem_raw(i175, 0, i174, descr=) + i178 = int_add(i168, i140) + i179 = int_sub(i160, i149) + i180 = uint_gt(i179, i151) + guard_false(i180, descr=...) + setarrayitem_raw(i178, 0, i174, descr=) + --TICK-- + i183 = arraylen_gc(p67, descr=) + i184 = arraylen_gc(p92, descr=) + jump(..., descr=...) """) From noreply at buildbot.pypy.org Sun Nov 10 09:19:14 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 09:19:14 +0100 (CET) Subject: [pypy-commit] cffi default: Small tweaks Message-ID: <20131110081914.6A9041C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1406:1352b9a24203 Date: 2013-11-10 08:59 +0100 http://bitbucket.org/cffi/cffi/changeset/1352b9a24203/ Log: Small tweaks diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -129,11 +129,9 @@ cdecl = cdecl.encode('ascii') # type = self._parser.parse_type(cdecl) - if hasattr(type, 'as_function_pointer'): - really_a_function_type = True + really_a_function_type = type.is_raw_function + if really_a_function_type: type = type.as_function_pointer() - else: - really_a_function_type = False btype = self._get_cached_btype(type) result = btype, really_a_function_type self._parsed_types[key] = result diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -2,6 +2,7 @@ class BaseTypeByIdentity(object): is_array_type = False + is_raw_function = False def get_c_name(self, replace_with='', context='a C file'): result = self.c_name_with_marker @@ -146,6 +147,7 @@ # a function, but not a pointer-to-function. The backend has no # notion of such a type; it's used temporarily by parsing. _base_pattern = '(&)(%s)' + is_raw_function = True def build_backend_type(self, ffi, finishlist): from . import api @@ -212,8 +214,10 @@ self.item = item self.length = length # - if length is None or length == '...': + if length is None: brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' else: brackets = '&[%d]' % length self.c_name_with_marker = ( @@ -449,6 +453,7 @@ return NamedPointerType(tp, name) def global_cache(srctype, ffi, funcname, *args, **kwds): + # NB. multithread: careful code that should work without an explicit lock key = kwds.pop('key', (funcname, args)) assert not kwds try: @@ -468,8 +473,7 @@ res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: raise NotImplementedError("%r: %s" % (srctype, e)) - ffi._backend.__typecache[key] = res - return res + return ffi._backend.__typecache.setdefault(key, res) def pointer_cache(ffi, BType): return global_cache('?', ffi, 'new_pointer_type', BType) From noreply at buildbot.pypy.org Sun Nov 10 09:19:15 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 09:19:15 +0100 (CET) Subject: [pypy-commit] cffi default: Bah, setdefault() is not atomic on WeakValueDictionary. Message-ID: <20131110081915.7B1B01C1041@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1407:d830b07122a6 Date: 2013-11-10 09:18 +0100 http://bitbucket.org/cffi/cffi/changeset/d830b07122a6/ Log: Bah, setdefault() is not atomic on WeakValueDictionary. diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -1,4 +1,6 @@ import weakref +from .lock import allocate_lock + class BaseTypeByIdentity(object): is_array_type = False @@ -452,8 +454,10 @@ tp = StructType(structname, None, None, None) return NamedPointerType(tp, name) + +global_lock = allocate_lock() + def global_cache(srctype, ffi, funcname, *args, **kwds): - # NB. multithread: careful code that should work without an explicit lock key = kwds.pop('key', (funcname, args)) assert not kwds try: @@ -473,7 +477,10 @@ res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: raise NotImplementedError("%r: %s" % (srctype, e)) - return ffi._backend.__typecache.setdefault(key, res) + # note that setdefault() on WeakValueDictionary is not atomic, + # which means that we have to use a lock too + with global_lock: + return ffi._backend.__typecache.setdefault(key, res) def pointer_cache(ffi, BType): return global_cache('?', ffi, 'new_pointer_type', BType) From noreply at buildbot.pypy.org Sun Nov 10 10:03:11 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 10:03:11 +0100 (CET) Subject: [pypy-commit] cffi default: Due to a CPython bug, we cannot use setdefault() here Message-ID: <20131110090311.5BD091C147C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1408:1e317570f885 Date: 2013-11-10 10:03 +0100 http://bitbucket.org/cffi/cffi/changeset/1e317570f885/ Log: Due to a CPython bug, we cannot use setdefault() here diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -477,10 +477,17 @@ res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: raise NotImplementedError("%r: %s" % (srctype, e)) - # note that setdefault() on WeakValueDictionary is not atomic, - # which means that we have to use a lock too + # note that setdefault() on WeakValueDictionary is not atomic + # and contains a rare bug (http://bugs.python.org/issue19542); + # we have to use a lock and do it ourselves + cache = ffi._backend.__typecache with global_lock: - return ffi._backend.__typecache.setdefault(key, res) + res1 = cache.get(key) + if res1 is None: + cache[key] = res + return res + else: + return res1 def pointer_cache(ffi, BType): return global_cache('?', ffi, 'new_pointer_type', BType) From noreply at buildbot.pypy.org Sun Nov 10 10:07:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 10:07:06 +0100 (CET) Subject: [pypy-commit] pypy default: WeakValueDictionary has a bug that is rare on CPython but Message-ID: <20131110090706.BAAEE1C14BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67922:1f02ae2291ae Date: 2013-11-10 09:54 +0100 http://bitbucket.org/pypy/pypy/changeset/1f02ae2291ae/ Log: WeakValueDictionary has a bug that is rare on CPython but more common on PyPy. Fix it here and report it as http://bugs.python.org/issue19542 diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -160,22 +160,26 @@ try: o = self.data.pop(key)() except KeyError: + o = None + if o is None: if args: return args[0] - raise - if o is None: raise KeyError, key else: return o + # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: - wr = self.data[key] + o = self.data[key]() except KeyError: + o = None + if o is None: self.data[key] = KeyedRef(default, self._remove, key) return default else: - return wr() + return o + # The logic above was fixed in PyPy def update(self, dict=None, **kwargs): d = self.data From noreply at buildbot.pypy.org Sun Nov 10 10:07:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 10:07:08 +0100 (CET) Subject: [pypy-commit] pypy default: Import cffi/1e317570f885 Message-ID: <20131110090708.2989C1C14BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67923:b1b2e7994eda Date: 2013-11-10 09:05 +0000 http://bitbucket.org/pypy/pypy/changeset/b1b2e7994eda/ Log: Import cffi/1e317570f885 diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -129,11 +129,9 @@ cdecl = cdecl.encode('ascii') # type = self._parser.parse_type(cdecl) - if hasattr(type, 'as_function_pointer'): - really_a_function_type = True + really_a_function_type = type.is_raw_function + if really_a_function_type: type = type.as_function_pointer() - else: - really_a_function_type = False btype = self._get_cached_btype(type) result = btype, really_a_function_type self._parsed_types[key] = result diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,7 +1,10 @@ import weakref +from .lock import allocate_lock + class BaseTypeByIdentity(object): is_array_type = False + is_raw_function = False def get_c_name(self, replace_with='', context='a C file'): result = self.c_name_with_marker @@ -146,6 +149,7 @@ # a function, but not a pointer-to-function. The backend has no # notion of such a type; it's used temporarily by parsing. _base_pattern = '(&)(%s)' + is_raw_function = True def build_backend_type(self, ffi, finishlist): from . import api @@ -212,8 +216,10 @@ self.item = item self.length = length # - if length is None or length == '...': + if length is None: brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' else: brackets = '&[%d]' % length self.c_name_with_marker = ( @@ -448,6 +454,9 @@ tp = StructType(structname, None, None, None) return NamedPointerType(tp, name) + +global_lock = allocate_lock() + def global_cache(srctype, ffi, funcname, *args, **kwds): key = kwds.pop('key', (funcname, args)) assert not kwds @@ -468,8 +477,17 @@ res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: raise NotImplementedError("%r: %s" % (srctype, e)) - ffi._backend.__typecache[key] = res - return res + # note that setdefault() on WeakValueDictionary is not atomic + # and contains a rare bug (http://bugs.python.org/issue19542); + # we have to use a lock and do it ourselves + cache = ffi._backend.__typecache + with global_lock: + res1 = cache.get(key) + if res1 is None: + cache[key] = res + return res + else: + return res1 def pointer_cache(ffi, BType): return global_cache('?', ffi, 'new_pointer_type', BType) From noreply at buildbot.pypy.org Sun Nov 10 10:07:09 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 10:07:09 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20131110090709.AB6911C14BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67924:1acc538d3b48 Date: 2013-11-10 10:06 +0100 http://bitbucket.org/pypy/pypy/changeset/1acc538d3b48/ Log: merge heads diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -95,6 +95,9 @@ if not self._copy_from_same(cdata, w_ob): self.convert_struct_from_object(cdata, w_ob, optvarsize=-1) + @jit.look_inside_iff( + lambda self, cdata, w_ob, optvarsize: jit.isvirtual(w_ob) + ) def convert_struct_from_object(self, cdata, w_ob, optvarsize): self._check_only_one_argument_for_union(w_ob) diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -327,4 +327,46 @@ log = self.run(main, [300]) loop, = log.loops_by_filename(self.filepath) - assert False, "XXX: fill this in" + assert loop.match(""" + i161 = int_lt(i160, i43) + guard_true(i161, descr=...) + i162 = int_add(i160, 1) + setfield_gc(p22, i162, descr=) + guard_not_invalidated(descr=...) + p163 = force_token() + p164 = force_token() + p165 = getarrayitem_gc(p67, 0, descr=) + guard_value(p165, ConstPtr(ptr70), descr=...) + p166 = getfield_gc(p165, descr=) + guard_value(p166, ConstPtr(ptr72), descr=...) + p167 = call(ConstClass(_ll_0_alloc_with_del___), descr=) + guard_no_exception(descr=...) + i168 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 12, descr=) + setfield_gc(p167, 0, descr=) + setfield_gc(p167, ConstPtr(ptr86), descr=) + guard_no_exception(descr=...) + i169 = int_add(i168, i97) + i170 = int_sub(i160, i106) + setfield_gc(p167, i168, descr=) + setfield_gc(p167, ConstPtr(ptr89), descr=) + i171 = uint_gt(i170, i108) + guard_false(i171, descr=...) + i172 = int_sub(i160, -2147483648) + i173 = int_and(i172, 4294967295) + i174 = int_add(i173, -2147483648) + setarrayitem_raw(i169, 0, i174, descr=) + i175 = int_add(i168, i121) + i176 = int_sub(i160, i130) + i177 = uint_gt(i176, i132) + guard_false(i177, descr=...) + setarrayitem_raw(i175, 0, i174, descr=) + i178 = int_add(i168, i140) + i179 = int_sub(i160, i149) + i180 = uint_gt(i179, i151) + guard_false(i180, descr=...) + setarrayitem_raw(i178, 0, i174, descr=) + --TICK-- + i183 = arraylen_gc(p67, descr=) + i184 = arraylen_gc(p92, descr=) + jump(..., descr=...) + """) From noreply at buildbot.pypy.org Sun Nov 10 10:12:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 10:12:24 +0100 (CET) Subject: [pypy-commit] cffi default: Issue #112: add demo/ in the distributed tar file. Message-ID: <20131110091224.483331C00B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1409:8dfdd2c265a1 Date: 2013-11-10 10:12 +0100 http://bitbucket.org/cffi/cffi/changeset/8dfdd2c265a1/ Log: Issue #112: add demo/ in the distributed tar file. diff --git a/MANIFEST.in b/MANIFEST.in --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,4 +2,5 @@ recursive-include c *.c *.h *.asm *.py recursive-include testing *.py recursive-include doc *.py *.rst Makefile *.bat +recursive-include demo py.cleanup *.py include LICENSE setup_base.py From noreply at buildbot.pypy.org Sun Nov 10 11:22:27 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 11:22:27 +0100 (CET) Subject: [pypy-commit] pypy default: Add protection against a rare, hard-to-reproduce bug that I believe cannot Message-ID: <20131110102227.B74111C0162@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67925:37537ea3d3c6 Date: 2013-11-10 10:21 +0000 http://bitbucket.org/pypy/pypy/changeset/37537ea3d3c6/ Log: Add protection against a rare, hard-to-reproduce bug that I believe cannot occur in CPython's refcounting model. diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -48,7 +48,14 @@ def remove(wr, selfref=ref(self)): self = selfref() if self is not None: - del self.data[wr.key] + # Changed this for PyPy: made more resistent. The + # issue is that in some corner cases, self.data + # might already be changed or removed by the time + # this weakref's callback is called. If that is + # the case, we don't want to randomly kill an + # unrelated entry. + if self.data.get(wr.key) is wr: + del self.data[wr.key] self._remove = remove UserDict.UserDict.__init__(self, *args, **kw) From noreply at buildbot.pypy.org Sun Nov 10 12:00:40 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 12:00:40 +0100 (CET) Subject: [pypy-commit] cffi default: Issue #118: improve the detection and error message, jumping Message-ID: <20131110110040.E1DD31C2FED@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1410:c3942c440199 Date: 2013-11-10 12:00 +0100 http://bitbucket.org/cffi/cffi/changeset/c3942c440199/ Log: Issue #118: improve the detection and error message, jumping through hoops to cover both signed and unsigned cases. diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -646,12 +646,23 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %d, ' - 'not %d",') - prnt(' "%s", "%s", (int)%s, %d);' % ( - name, enumerator, enumerator, enumvalue)) + prnt(' "enum %s: %s has the real value %s, ' + 'not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + name, enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return %s;' % self._chained_list_constants[True]) diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -422,11 +422,22 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' snprintf(out_error, 255,' - '"%s has the real value %d, not %d",') - prnt(' "%s", (int)%s, %d);' % ( - enumerator, enumerator, enumvalue)) + ' "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % ( + enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1595,6 +1595,19 @@ ## assert ffi.sizeof("enum foo_e") == expected_size ## assert int(ffi.cast("enum foo_e", -1)) == expected_minus1 +def test_enum_bug118(): + maxulong = 256 ** FFI().sizeof("unsigned long") - 1 + for c1, c2, c2c in [(0xffffffff, -1, ''), + (maxulong, -1, ''), + (-1, 0xffffffff, 'U'), + (-1, maxulong, 'UL')]: + ffi = FFI() + ffi.cdef("enum foo_e { AA=%s };" % c1) + e = py.test.raises(VerificationError, ffi.verify, + "enum foo_e { AA=%s%s };" % (c2, c2c)) + assert str(e.value) == ('enum foo_e: AA has the real value %d, not %d' + % (c2, c1)) + def test_string_to_voidp_arg(): ffi = FFI() ffi.cdef("int myfunc(void *);") From noreply at buildbot.pypy.org Sun Nov 10 12:06:41 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Sun, 10 Nov 2013 12:06:41 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: non-gc arrays mustn't get a barrier (getarraysize) Message-ID: <20131110110641.B92411C2FED@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r67926:eb21e739cefa Date: 2013-11-10 12:05 +0100 http://bitbucket.org/pypy/pypy/changeset/eb21e739cefa/ Log: non-gc arrays mustn't get a barrier (getarraysize) diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -28,6 +28,27 @@ assert len(self.writemode) == 0 assert self.barriers == ['I2R'] + def test_array_size(self): + array_gc = lltype.GcArray(('z', lltype.Signed)) + array_nongc = lltype.Array(('z', lltype.Signed)) + Q = lltype.GcStruct('Q', + ('gc', lltype.Ptr(array_gc)), + ('raw', lltype.Ptr(array_nongc))) + q = lltype.malloc(Q, immortal=True) + q.gc = lltype.malloc(array_gc, n=3, flavor='gc', immortal=True) + q.raw = lltype.malloc(array_nongc, n=5, flavor='raw', immortal=True) + def f1(n): + if n == 1: + return len(q.gc) + else: + return len(q.raw) + res = self.interpret(f1, [1]) + assert self.barriers == ['I2R', 'a2i'] + res = self.interpret(f1, [0]) + assert self.barriers == ['I2R'] + + + def test_simple_read_2(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) x2 = lltype.malloc(X, immortal=True) diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -85,7 +85,8 @@ # field even on a stub pass - elif op.opname in ('getarraysize', 'getinteriorarraysize'): + elif (op.opname in ('getarraysize', 'getinteriorarraysize') and + is_gc_ptr(op.args[0].concretetype)): # XXX: or (is_getter and is_immutable(op))): # we can't leave getarraysize or the immutable getfields # fully unmodified: we need at least immut_read_barrier From noreply at buildbot.pypy.org Sun Nov 10 13:20:35 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 13:20:35 +0100 (CET) Subject: [pypy-commit] cffi default: Issue #116: give out a warning when we're doing that Message-ID: <20131110122035.879D71C0162@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1411:861bff9ef031 Date: 2013-11-10 13:20 +0100 http://bitbucket.org/cffi/cffi/changeset/861bff9ef031/ Log: Issue #116: give out a warning when we're doing that diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -160,7 +160,10 @@ def __dir__(self): return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() - module._cffi_setup(lst, ffiplatform.VerificationError, library) + if module._cffi_setup(lst, ffiplatform.VerificationError, library): + import warnings + warnings.warn("reimporting %r might overwrite older definitions" + % (self.verifier.get_module_name())) # # finally, call the loaded_cpy_xxx() functions. This will perform # the final adjustments, like copying the Python->C wrapper @@ -754,12 +757,9 @@ def _generate_setup_custom(self): prnt = self._prnt - prnt('static PyObject *_cffi_setup_custom(PyObject *lib)') + prnt('static int _cffi_setup_custom(PyObject *lib)') prnt('{') - prnt(' if (%s < 0)' % self._chained_list_constants[True]) - prnt(' return NULL;') - prnt(' Py_INCREF(Py_None);') - prnt(' return Py_None;') + prnt(' return %s;' % self._chained_list_constants[True]) prnt('}') cffimod_header = r''' @@ -877,17 +877,20 @@ static void *_cffi_exports[_CFFI_NUM_EXPORTS]; static PyObject *_cffi_types, *_cffi_VerificationError; -static PyObject *_cffi_setup_custom(PyObject *lib); /* forward */ +static int _cffi_setup_custom(PyObject *lib); /* forward */ static PyObject *_cffi_setup(PyObject *self, PyObject *args) { PyObject *library; + int was_alive = (_cffi_types != NULL); if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, &library)) return NULL; Py_INCREF(_cffi_types); Py_INCREF(_cffi_VerificationError); - return _cffi_setup_custom(library); + if (_cffi_setup_custom(library) < 0) + return NULL; + return PyBool_FromLong(was_alive); } static void _cffi_init(void) From noreply at buildbot.pypy.org Sun Nov 10 16:08:32 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 16:08:32 +0100 (CET) Subject: [pypy-commit] pypy default: These masks are not needed. The first is useless, and the 2nd one is Message-ID: <20131110150832.A07171C14BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67927:e66808b74918 Date: 2013-11-10 14:12 +0100 http://bitbucket.org/pypy/pypy/changeset/e66808b74918/ Log: These masks are not needed. The first is useless, and the 2nd one is done by setdigits() anyway. diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -930,14 +930,12 @@ loshift = int_other % SHIFT hishift = SHIFT - loshift - lomask = (1 << hishift) - 1 - himask = MASK ^ lomask z = rbigint([NULLDIGIT] * newsize, self.sign, newsize) i = 0 while i < newsize: - newdigit = (self.digit(wordshift) >> loshift) & lomask + newdigit = (self.digit(wordshift) >> loshift) if i+1 < newsize: - newdigit |= (self.digit(wordshift+1) << hishift) & himask + newdigit |= (self.digit(wordshift+1) << hishift) z.setdigit(i, newdigit) i += 1 wordshift += 1 From noreply at buildbot.pypy.org Sun Nov 10 16:08:33 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 16:08:33 +0100 (CET) Subject: [pypy-commit] pypy default: Add a function to extract a (small) number of bits from a bigint. Message-ID: <20131110150833.CD72E1C14BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67928:6eaf1ec3939a Date: 2013-11-10 14:33 +0100 http://bitbucket.org/pypy/pypy/changeset/6eaf1ec3939a/ Log: Add a function to extract a (small) number of bits from a bigint. diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -943,6 +943,21 @@ return z rshift._always_inline_ = 'try' # It's so fast that it's always benefitial. + def abs_rshift_and_mask(self, bigshiftcount, mask): + assert type(bigshiftcount) is r_ulonglong + assert mask >= 0 + wordshift = bigshiftcount / SHIFT + numdigits = self.numdigits() + if wordshift >= numdigits: + return 0 + wordshift = intmask(wordshift) + loshift = intmask(intmask(bigshiftcount) - intmask(wordshift * SHIFT)) + lastdigit = self.digit(wordshift) >> loshift + if mask > (MASK >> loshift) and wordshift + 1 < numdigits: + hishift = SHIFT - loshift + lastdigit |= self.digit(wordshift+1) << hishift + return lastdigit & mask + @jit.elidable def and_(self, other): return _bitwise(self, '&', other) diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -475,6 +475,7 @@ def test_shift(self): negative = -23 + masks_list = [int((1 << i) - 1) for i in range(1, r_uint.BITS-1)] for x in gen_signs([3L ** 30L, 5L ** 20L, 7 ** 300, 0L, 1L]): f1 = rbigint.fromlong(x) py.test.raises(ValueError, f1.lshift, negative) @@ -484,7 +485,10 @@ res2 = f1.rshift(int(y)).tolong() assert res1 == x << y assert res2 == x >> y - + for mask in masks_list: + res3 = f1.abs_rshift_and_mask(r_ulonglong(y), mask) + assert res3 == (abs(x) >> y) & mask + def test_bitwise(self): for x in gen_signs([0, 1, 5, 11, 42, 43, 3 ** 30]): for y in gen_signs([0, 1, 5, 11, 42, 43, 3 ** 30, 3 ** 31]): From noreply at buildbot.pypy.org Sun Nov 10 16:08:35 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 16:08:35 +0100 (CET) Subject: [pypy-commit] pypy default: Fix the bogus complexity of marshal dumping "long" objects Message-ID: <20131110150835.012821C14BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67929:837dea310b4d Date: 2013-11-10 14:48 +0100 http://bitbucket.org/pypy/pypy/changeset/837dea310b4d/ Log: Fix the bogus complexity of marshal dumping "long" objects diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -56,3 +56,29 @@ class AppTestMarshalSmallLong(AppTestMarshalMore): spaceconfig = dict(usemodules=('array',), **{"objspace.std.withsmalllong": True}) + + +def test_long_more(space): + import marshal, struct + + class FakeM: + def __init__(self): + self.seen = [] + def start(self, code): + self.seen.append(code) + def put_int(self, value): + self.seen.append(struct.pack("i", value)) + def put_short(self, value): + self.seen.append(struct.pack("h", value)) + + def _marshal_check(x): + expected = marshal.dumps(long(x)) + w_obj = space.wraplong(x) + m = FakeM() + space.marshal_w(w_obj, m) + assert ''.join(m.seen) == expected + + for sign in [1L, -1L]: + for i in range(100): + _marshal_check(sign * ((1L << i) - 1L)) + _marshal_check(sign * (1L << i)) diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py --- a/pypy/objspace/std/marshal_impl.py +++ b/pypy/objspace/std/marshal_impl.py @@ -207,20 +207,20 @@ def marshal_w__Long(space, w_long, m): from rpython.rlib.rbigint import rbigint + from rpython.rlib.rarithmetic import r_ulonglong m.start(TYPE_LONG) SHIFT = 15 MASK = (1 << SHIFT) - 1 num = w_long.num sign = num.sign num = num.abs() - ints = [] - while num.tobool(): - next = intmask(num.uintmask() & MASK) - ints.append(next) - num = num.rshift(SHIFT) - m.put_int(len(ints) * sign) - for i in ints: - m.put_short(i) + total_length = (num.bit_length() + (SHIFT - 1)) / SHIFT + m.put_int(total_length * sign) + bigshiftcount = r_ulonglong(0) + for i in range(total_length): + next = num.abs_rshift_and_mask(bigshiftcount, MASK) + m.put_short(next) + bigshiftcount += SHIFT def unmarshal_Long(space, u, tc): from rpython.rlib.rbigint import rbigint From noreply at buildbot.pypy.org Sun Nov 10 16:08:36 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 16:08:36 +0100 (CET) Subject: [pypy-commit] pypy default: Reverse: a way to build rbigint objects from digits given in base 2**n, Message-ID: <20131110150836.E20A71C14BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67930:196622fea620 Date: 2013-11-10 16:04 +0100 http://bitbucket.org/pypy/pypy/changeset/196622fea620/ Log: Reverse: a way to build rbigint objects from digits given in base 2**n, for any n <= SHIFT. diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -943,6 +943,7 @@ return z rshift._always_inline_ = 'try' # It's so fast that it's always benefitial. + @jit.elidable def abs_rshift_and_mask(self, bigshiftcount, mask): assert type(bigshiftcount) is r_ulonglong assert mask >= 0 @@ -958,6 +959,39 @@ lastdigit |= self.digit(wordshift+1) << hishift return lastdigit & mask + @staticmethod + def from_list_n_bits(list, nbits): + if len(list) == 0: + return NULLRBIGINT + + if nbits == SHIFT: + z = rbigint(list, 1) + else: + if not (1 <= nbits < SHIFT): + raise ValueError + + lllength = (r_ulonglong(len(list)) * nbits) // SHIFT + length = intmask(lllength) + 1 + z = rbigint([NULLDIGIT] * length, 1) + + out = 0 + i = 0 + accum = 0 + for input in list: + accum |= (input << i) + original_i = i + i += nbits + if i > SHIFT: + z.setdigit(out, accum) + out += 1 + accum = input >> (SHIFT - original_i) + i -= SHIFT + assert out < length + z.setdigit(out, accum) + + z._normalize() + return z + @jit.elidable def and_(self, other): return _bitwise(self, '&', other) diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -489,6 +489,20 @@ res3 = f1.abs_rshift_and_mask(r_ulonglong(y), mask) assert res3 == (abs(x) >> y) & mask + def test_from_list_n_bits(self): + for x in ([3L ** 30L, 5L ** 20L, 7 ** 300] + + [1L << i for i in range(130)] + + [(1L << i) - 1L for i in range(130)]): + for nbits in range(1, SHIFT+1): + mask = (1 << nbits) - 1 + lst = [] + got = x + while got > 0: + lst.append(int(got & mask)) + got >>= nbits + f1 = rbigint.from_list_n_bits(lst, nbits) + assert f1.tolong() == x + def test_bitwise(self): for x in gen_signs([0, 1, 5, 11, 42, 43, 3 ** 30]): for y in gen_signs([0, 1, 5, 11, 42, 43, 3 ** 30, 3 ** 31]): From noreply at buildbot.pypy.org Sun Nov 10 16:08:38 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 16:08:38 +0100 (CET) Subject: [pypy-commit] pypy default: Unmarshal longs using the new from_list_n_bits(). Message-ID: <20131110150838.6465A1C14BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67931:31ea0b839ffc Date: 2013-11-10 16:06 +0100 http://bitbucket.org/pypy/pypy/changeset/31ea0b839ffc/ Log: Unmarshal longs using the new from_list_n_bits(). diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py --- a/pypy/objspace/std/marshal_impl.py +++ b/pypy/objspace/std/marshal_impl.py @@ -230,11 +230,8 @@ lng = -lng else: negative = False - SHIFT = 15 - result = rbigint.fromint(0) - for i in range(lng): - shift = i * SHIFT - result = result.or_(rbigint.fromint(u.get_short()).lshift(shift)) + digits = [u.get_short() for i in range(lng)] + result = rbigint.from_list_n_bits(digits, 15) if lng and not result.tobool(): raise_exception(space, 'bad marshal data') if negative: From noreply at buildbot.pypy.org Sun Nov 10 16:13:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 16:13:57 +0100 (CET) Subject: [pypy-commit] pypy default: Add unmarshalling tests here too, for longs. Message-ID: <20131110151357.B55101C0162@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67932:b217875cc083 Date: 2013-11-10 16:12 +0100 http://bitbucket.org/pypy/pypy/changeset/b217875cc083/ Log: Add unmarshalling tests here too, for longs. diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -77,6 +77,10 @@ m = FakeM() space.marshal_w(w_obj, m) assert ''.join(m.seen) == expected + # + u = interp_marshal.StringUnmarshaller(space, space.wrap(expected)) + w_long = u.load_w_obj() + assert space.eq_w(w_long, w_obj) is True for sign in [1L, -1L]: for i in range(100): From noreply at buildbot.pypy.org Sun Nov 10 16:45:33 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 16:45:33 +0100 (CET) Subject: [pypy-commit] pypy default: Translation fix Message-ID: <20131110154533.45C711C00B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67933:c23cbc5014a9 Date: 2013-11-10 16:44 +0100 http://bitbucket.org/pypy/pypy/changeset/c23cbc5014a9/ Log: Translation fix diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -945,7 +945,7 @@ @jit.elidable def abs_rshift_and_mask(self, bigshiftcount, mask): - assert type(bigshiftcount) is r_ulonglong + assert isinstance(bigshiftcount, r_ulonglong) assert mask >= 0 wordshift = bigshiftcount / SHIFT numdigits = self.numdigits() From noreply at buildbot.pypy.org Sun Nov 10 19:19:02 2013 From: noreply at buildbot.pypy.org (timfel) Date: Sun, 10 Nov 2013 19:19:02 +0100 (CET) Subject: [pypy-commit] lang-scheme default: fix compile on windows, add nicer error when file not found Message-ID: <20131110181902.D0C091C0144@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r45:b1d5a1b8744f Date: 2013-11-10 19:15 +0100 http://bitbucket.org/pypy/lang-scheme/changeset/b1d5a1b8744f/ Log: fix compile on windows, add nicer error when file not found diff --git a/scheme/execution.py b/scheme/execution.py --- a/scheme/execution.py +++ b/scheme/execution.py @@ -137,4 +137,3 @@ return loc return None - diff --git a/scheme/targetscheme.py b/scheme/targetscheme.py --- a/scheme/targetscheme.py +++ b/scheme/targetscheme.py @@ -2,6 +2,7 @@ A simple standalone target for the scheme interpreter. """ +import os import sys from rpython.rlib.streamio import open_file_as_stream from rpython.rlib.parsing.makepackrat import BacktrackException @@ -14,13 +15,23 @@ def entry_point(argv): if len(argv) == 2: - code = open_file_as_stream(argv[1]).readall() + path = argv[1] + try: + f = open_file_as_stream(path, buffering=0) + except OSError as e: + os.write(2, "%s -- %s (LoadError)\n" % (os.strerror(e.errno), path)) + return 1 + try: + code = f.readall() + finally: + f.close() + try: t = parse(code) except BacktrackException, e: (line, col) = e.error.get_line_column(code) #expected = " ".join(e.error.expected) - print "parse error in line %d, column %d" % (line, col) + os.write(2, "parse error in line %d, column %d" % (line, col)) return 1 #this should not be necessary here @@ -51,4 +62,3 @@ if __name__ == '__main__': entry_point(sys.argv) - From noreply at buildbot.pypy.org Sun Nov 10 23:08:46 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 23:08:46 +0100 (CET) Subject: [pypy-commit] pypy default: Issue1627. Test and fix: it's a bit strange, but we need to follow Message-ID: <20131110220846.A8BE71C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67934:1dc9140b7f44 Date: 2013-11-10 23:08 +0100 http://bitbucket.org/pypy/pypy/changeset/1dc9140b7f44/ Log: Issue1627. Test and fix: it's a bit strange, but we need to follow CPython's lead here and return a string as long as possible, and a unicode only when asked to. diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -4,6 +4,21 @@ from __pypy__.builders import StringBuilder, UnicodeBuilder +class StringOrUnicodeBuilder(object): + def __init__(self): + self._builder = StringBuilder() + def append(self, string): + try: + self._builder.append(string) + except UnicodeEncodeError: + ub = UnicodeBuilder() + ub.append(self._builder.build()) + self._builder = ub + ub.append(string) + def build(self): + return self._builder.build() + + ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') @@ -192,7 +207,7 @@ if self.ensure_ascii: builder = StringBuilder() else: - builder = UnicodeBuilder() + builder = StringOrUnicodeBuilder() self.__encode(o, markers, builder, 0) return builder.build() diff --git a/pypy/module/test_lib_pypy/test_json_extra.py b/pypy/module/test_lib_pypy/test_json_extra.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_json_extra.py @@ -0,0 +1,14 @@ +import py, json + +def is_(x, y): + return type(x) is type(y) and x == y + +def test_no_ensure_ascii(): + assert is_(json.dumps(u"\u1234", ensure_ascii=False), u'"\u1234"') + assert is_(json.dumps("\xc0", ensure_ascii=False), '"\xc0"') + e = py.test.raises(UnicodeDecodeError, json.dumps, + (u"\u1234", "\xc0"), ensure_ascii=False) + assert str(e.value).startswith("'ascii' codec can't decode byte 0xc0 ") + e = py.test.raises(UnicodeDecodeError, json.dumps, + ("\xc0", u"\u1234"), ensure_ascii=False) + assert str(e.value).startswith("'ascii' codec can't decode byte 0xc0 ") From noreply at buildbot.pypy.org Sun Nov 10 23:31:55 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Nov 2013 23:31:55 +0100 (CET) Subject: [pypy-commit] pypy default: Kill a few lines of very old outdated doc. Message-ID: <20131110223155.B17471C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67935:50b1613bc5b9 Date: 2013-11-10 23:31 +0100 http://bitbucket.org/pypy/pypy/changeset/50b1613bc5b9/ Log: Kill a few lines of very old outdated doc. diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -137,7 +137,8 @@ control flow of a function (such as ``while`` and ``try`` constructs) - a value stack where bytecode interpretation pulls object - from and puts results on. + from and puts results on. (``locals_stack_w`` is actually a single + list containing both the local scope and the value stack.) - a reference to the *globals* dictionary, containing module-level name-value bindings @@ -151,10 +152,7 @@ - the class ``PyFrame`` is defined in `pypy/interpreter/pyframe.py`_. -- the file `pypy/interpreter/pyopcode.py`_ add support for all Python opcode. - -- nested scope support is added to the ``PyFrame`` class in - `pypy/interpreter/nestedscope.py`_. +- the file `pypy/interpreter/pyopcode.py`_ add support for all Python opcodes. .. _Code: @@ -184,12 +182,6 @@ * ``co_name`` name of the code object (often the function name) * ``co_lnotab`` a helper table to compute the line-numbers corresponding to bytecodes -In PyPy, code objects also have the responsibility of creating their Frame_ objects -via the `'create_frame()`` method. With proper parser and compiler support this would -allow to create custom Frame objects extending the execution of functions -in various ways. The several Frame_ classes already utilize this flexibility -in order to implement Generators and Nested Scopes. - .. _Function: Function and Method classes From noreply at buildbot.pypy.org Mon Nov 11 00:14:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Nov 2013 00:14:43 +0100 (CET) Subject: [pypy-commit] pypy default: Issue1491. Test and fix: when CSI escape sequences are given in the Message-ID: <20131110231443.F00321C00B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67936:b1648711d9dc Date: 2013-11-11 00:14 +0100 http://bitbucket.org/pypy/pypy/changeset/b1648711d9dc/ Log: Issue1491. Test and fix: when CSI escape sequences are given in the prompts for raw_input(), even if they are not within \x01..\x02, then detect and ignore them for computing the printed length of the string. diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -19,11 +19,13 @@ # CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -import types +import types, re from pyrepl import unicodedata_ from pyrepl import commands from pyrepl import input +_r_csi_seq = re.compile(r"\033\[[ -@]*[A-~]") + def _make_unctrl_map(): uc_map = {} for c in map(unichr, range(256)): @@ -309,6 +311,10 @@ excluded from the length calculation. So also a copy of the prompt is returned with these control characters removed. """ + # The logic below also ignores the length of common escape + # sequences if they were not explicitly within \x01...\x02. + # They are CSI (or ANSI) sequences ( ESC [ ... LETTER ) + out_prompt = '' l = len(prompt) pos = 0 @@ -321,9 +327,13 @@ break # Found start and end brackets, subtract from string length l = l - (e-s+1) - out_prompt += prompt[pos:s] + prompt[s+1:e] + keep = prompt[pos:s] + l -= sum(map(len, _r_csi_seq.findall(keep))) + out_prompt += keep + prompt[s+1:e] pos = e+1 - out_prompt += prompt[pos:] + keep = prompt[pos:] + l -= sum(map(len, _r_csi_seq.findall(keep))) + out_prompt += keep return out_prompt, l def bow(self, p=None): diff --git a/pypy/module/test_lib_pypy/pyrepl/test_reader.py b/pypy/module/test_lib_pypy/pyrepl/test_reader.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/pyrepl/test_reader.py @@ -0,0 +1,9 @@ + +def test_process_prompt(): + from pyrepl.reader import Reader + r = Reader(None) + assert r.process_prompt("hi!") == ("hi!", 3) + assert r.process_prompt("h\x01i\x02!") == ("hi!", 2) + assert r.process_prompt("hi\033[11m!") == ("hi\033[11m!", 3) + assert r.process_prompt("h\x01i\033[11m!\x02") == ("hi\033[11m!", 1) + assert r.process_prompt("h\033[11m\x01i\x02!") == ("h\033[11mi!", 2) From noreply at buildbot.pypy.org Mon Nov 11 00:18:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Nov 2013 00:18:00 +0100 (CET) Subject: [pypy-commit] pypy default: CPython compat: accept (but ignore) an argument to gc.collect(). Message-ID: <20131110231800.1D7EF1C00B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67937:7fdbc8f2860e Date: 2013-11-11 00:17 +0100 http://bitbucket.org/pypy/pypy/changeset/7fdbc8f2860e/ Log: CPython compat: accept (but ignore) an argument to gc.collect(). diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -3,8 +3,9 @@ from rpython.rlib import rgc from rpython.rlib.streamio import open_file_as_stream -def collect(space): - "Run a full collection." + at unwrap_spec(generation=int) +def collect(space, generation=0): + "Run a full collection. The optional argument is ignored." # First clear the method cache. See test_gc for an example of why. if space.config.objspace.std.withmethodcache: from pypy.objspace.std.typeobject import MethodCache diff --git a/pypy/module/gc/test/test_gc.py b/pypy/module/gc/test/test_gc.py --- a/pypy/module/gc/test/test_gc.py +++ b/pypy/module/gc/test/test_gc.py @@ -4,6 +4,7 @@ def test_collect(self): import gc gc.collect() # mostly a "does not crash" kind of test + gc.collect(0) # mostly a "does not crash" kind of test def test_disable_finalizers(self): import gc From noreply at buildbot.pypy.org Mon Nov 11 04:17:12 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 11 Nov 2013 04:17:12 +0100 (CET) Subject: [pypy-commit] pypy default: fix a numpy complex zero division case Message-ID: <20131111031712.48D561C00F8@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67938:7acb38913282 Date: 2013-11-10 20:11 -0500 http://bitbucket.org/pypy/pypy/changeset/7acb38913282/ Log: fix a numpy complex zero division case diff --git a/pypy/module/micronumpy/test/dummy_module.py b/pypy/module/micronumpy/test/dummy_module.py --- a/pypy/module/micronumpy/test/dummy_module.py +++ b/pypy/module/micronumpy/test/dummy_module.py @@ -1,6 +1,7 @@ from _numpypy.multiarray import * from _numpypy.umath import * +inf = float('inf') nan = float('nan') newaxis = None ufunc = type(sin) diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -96,3 +96,20 @@ assert str(np.complex128(complex(1, float('-nan')))) == '(1+nan*j)' assert str(np.complex128(complex(1, float('inf')))) == '(1+inf*j)' assert str(np.complex128(complex(1, float('-inf')))) == '(1-inf*j)' + + def test_complex_zero_division(self): + import numpy as np + for t in [np.complex64, np.complex128]: + a = t(0.0) + b = t(1.0) + assert np.isinf(b/a) + b = t(complex(np.inf, np.inf)) + assert np.isinf(b/a) + b = t(complex(np.inf, np.nan)) + assert np.isinf(b/a) + b = t(complex(np.nan, np.inf)) + assert np.isinf(b/a) + b = t(complex(np.nan, np.nan)) + assert np.isnan(b/a) + b = t(0.) + assert np.isnan(b/a) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1132,7 +1132,8 @@ try: return rcomplex.c_div(v1, v2) except ZeroDivisionError: - if rcomplex.c_abs(*v1) == 0: + if rcomplex.c_abs(*v1) == 0 or \ + (rfloat.isnan(v1[0]) and rfloat.isnan(v1[1])): return rfloat.NAN, rfloat.NAN return rfloat.INFINITY, rfloat.INFINITY From noreply at buildbot.pypy.org Mon Nov 11 04:17:13 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 11 Nov 2013 04:17:13 +0100 (CET) Subject: [pypy-commit] pypy default: fix a complex scalar str_format case Message-ID: <20131111031713.99EFF1C0144@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67939:9ed2dd8cdfe8 Date: 2013-11-10 20:44 -0500 http://bitbucket.org/pypy/pypy/changeset/9ed2dd8cdfe8/ Log: fix a complex scalar str_format case diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -92,10 +92,15 @@ def test_complex_str_format(self): import numpy as np - assert str(np.complex128(complex(1, float('nan')))) == '(1+nan*j)' - assert str(np.complex128(complex(1, float('-nan')))) == '(1+nan*j)' - assert str(np.complex128(complex(1, float('inf')))) == '(1+inf*j)' - assert str(np.complex128(complex(1, float('-inf')))) == '(1-inf*j)' + for t in [np.complex64, np.complex128]: + assert str(t(complex(1, float('nan')))) == '(1+nan*j)' + assert str(t(complex(1, float('-nan')))) == '(1+nan*j)' + assert str(t(complex(1, float('inf')))) == '(1+inf*j)' + assert str(t(complex(1, float('-inf')))) == '(1-inf*j)' + for x in [0, 1, -1]: + assert str(t(complex(x))) == str(complex(x)) + assert str(t(x*1j)) == str(complex(x*1j)) + assert str(t(x + x*1j)) == str(complex(x + x*1j)) def test_complex_zero_division(self): import numpy as np diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1030,7 +1030,7 @@ imag_str += 'j' # (0+2j) => 2j - if real == 0: + if real == 0 and math.copysign(1, real) == 1: return imag_str real_str = str_format(real) From noreply at buildbot.pypy.org Mon Nov 11 04:17:14 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 11 Nov 2013 04:17:14 +0100 (CET) Subject: [pypy-commit] pypy default: fix a numpy scalar indexing case Message-ID: <20131111031714.D61D21C00F8@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67940:99f9d0c4eb89 Date: 2013-11-10 21:30 -0500 http://bitbucket.org/pypy/pypy/changeset/99f9d0c4eb89/ Log: fix a numpy scalar indexing case diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -126,6 +126,15 @@ def item(self, space): return self.get_dtype(space).itemtype.to_builtin_type(space, self) + def descr_getitem(self, space, w_item): + from pypy.module.micronumpy.base import convert_to_array + if space.is_w(w_item, space.w_Ellipsis) or \ + (space.isinstance_w(w_item, space.w_tuple) and + space.len_w(w_item) == 0): + return convert_to_array(space, self) + raise OperationError(space.w_IndexError, space.wrap( + "invalid index to scalar variable")) + def descr_str(self, space): return space.wrap(self.get_dtype(space).itemtype.str_format(self)) @@ -467,6 +476,7 @@ __new__ = interp2app(W_GenericBox.descr__new__.im_func), + __getitem__ = interp2app(W_GenericBox.descr_getitem), __str__ = interp2app(W_GenericBox.descr_str), __repr__ = interp2app(W_GenericBox.descr_str), __format__ = interp2app(W_GenericBox.descr_format), diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -83,6 +83,15 @@ assert value.ndim == 0 assert value.T is value + def test_indexing(self): + import numpy as np + v = np.int32(2) + for b in [v[()], v[...]]: + assert isinstance(b, np.ndarray) + assert b.shape == () + assert b == v + raises(IndexError, "v['blah']") + def test_complex_scalar_complex_cast(self): import numpy as np for tp in [np.csingle, np.cdouble, np.clongdouble]: From noreply at buildbot.pypy.org Mon Nov 11 04:17:16 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 11 Nov 2013 04:17:16 +0100 (CET) Subject: [pypy-commit] pypy default: provide len for numpy dtypes Message-ID: <20131111031716.2CFEE1C00F8@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67941:8e73b35beb24 Date: 2013-11-10 21:42 -0500 http://bitbucket.org/pypy/pypy/changeset/8e73b35beb24/ Log: provide len for numpy dtypes diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -254,6 +254,11 @@ raise OperationError(space.w_KeyError, space.wrap( "Field named '%s' not found." % item)) + def descr_len(self, space): + if self.fields is None: + return space.wrap(0) + return space.wrap(len(self.fields)) + def descr_reduce(self, space): w_class = space.type(self) @@ -397,6 +402,7 @@ __eq__ = interp2app(W_Dtype.descr_eq), __ne__ = interp2app(W_Dtype.descr_ne), __getitem__ = interp2app(W_Dtype.descr_getitem), + __len__ = interp2app(W_Dtype.descr_len), __reduce__ = interp2app(W_Dtype.descr_reduce), __setstate__ = interp2app(W_Dtype.descr_setstate), diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -221,6 +221,13 @@ for i in range(5): assert b[i] == i * 2 + def test_len(self): + import numpy as np + d = np.dtype('int32') + assert len(d) == 0 + d = np.dtype([('x', 'i4'), ('y', 'i4')]) + assert len(d) == 2 + def test_shape(self): from numpypy import dtype assert dtype(long).shape == () From noreply at buildbot.pypy.org Mon Nov 11 04:17:17 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 11 Nov 2013 04:17:17 +0100 (CET) Subject: [pypy-commit] pypy default: link the ndarray.{resize, squeeze} stubs Message-ID: <20131111031717.9983B1C00F8@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67942:808f15933ea2 Date: 2013-11-10 21:48 -0500 http://bitbucket.org/pypy/pypy/changeset/808f15933ea2/ Log: link the ndarray.{resize,squeeze} stubs diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -640,7 +640,8 @@ def descr_put(self, space, w_indices, w_values, w_mode=None): put(space, self, w_indices, w_values, w_mode) - def descr_resize(self, space, w_new_shape, w_refcheck=True): + @unwrap_spec(w_refcheck=WrappedDefault(True)) + def descr_resize(self, space, w_new_shape, w_refcheck=None): raise OperationError(space.w_NotImplementedError, space.wrap( "resize not implemented yet")) @@ -692,7 +693,7 @@ return return self.implementation.sort(space, w_axis, w_order) - def descr_squeeze(self, space): + def descr_squeeze(self, space, w_axis=None): raise OperationError(space.w_NotImplementedError, space.wrap( "squeeze not implemented yet")) @@ -1218,6 +1219,8 @@ copy = interp2app(W_NDimArray.descr_copy), reshape = interp2app(W_NDimArray.descr_reshape), + resize = interp2app(W_NDimArray.descr_resize), + squeeze = interp2app(W_NDimArray.descr_squeeze), T = GetSetProperty(W_NDimArray.descr_get_transpose), transpose = interp2app(W_NDimArray.descr_transpose), tolist = interp2app(W_NDimArray.descr_tolist), diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1713,6 +1713,19 @@ 4, 4]]).all() assert (array([1, 2]).repeat(2) == array([1, 1, 2, 2])).all() + def test_resize(self): + import numpy as np + a = np.array([1,2,3]) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, a.resize, ()) + + def test_squeeze(self): + import numpy as np + a = np.array([1,2,3]) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, a.squeeze) def test_swapaxes(self): from numpypy import array From noreply at buildbot.pypy.org Mon Nov 11 04:17:18 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 11 Nov 2013 04:17:18 +0100 (CET) Subject: [pypy-commit] pypy default: provide ndarray.squeeze Message-ID: <20131111031718.D85B81C00F8@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67943:00ec1be89de6 Date: 2013-11-10 22:14 -0500 http://bitbucket.org/pypy/pypy/changeset/00ec1be89de6/ Log: provide ndarray.squeeze diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -694,8 +694,16 @@ return self.implementation.sort(space, w_axis, w_order) def descr_squeeze(self, space, w_axis=None): - raise OperationError(space.w_NotImplementedError, space.wrap( - "squeeze not implemented yet")) + if not space.is_none(w_axis): + raise OperationError(space.w_NotImplementedError, space.wrap( + "axis unsupported for squeeze")) + cur_shape = self.get_shape() + new_shape = [s for s in cur_shape if s != 1] + if len(cur_shape) == len(new_shape): + return self + return wrap_impl(space, space.type(self), self, + self.implementation.get_view( + self, self.get_dtype(), new_shape)) def descr_strides(self, space): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -705,7 +713,7 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "tofile not implemented yet")) - def descr_view(self, space, w_dtype=None, w_type=None) : + def descr_view(self, space, w_dtype=None, w_type=None): if not w_type and w_dtype: try: if space.is_true(space.issubtype(w_dtype, space.gettypefor(W_NDimArray))): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1723,9 +1723,13 @@ def test_squeeze(self): import numpy as np a = np.array([1,2,3]) - import sys - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, a.squeeze) + assert a.squeeze() is a + a = np.array([[1,2,3]]) + b = a.squeeze() + assert b.shape == (3,) + assert (b == a).all() + b[1] = -1 + assert a[0][1] == -1 def test_swapaxes(self): from numpypy import array From noreply at buildbot.pypy.org Mon Nov 11 04:49:24 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 11 Nov 2013 04:49:24 +0100 (CET) Subject: [pypy-commit] pypy default: provide copy for numpy scalars Message-ID: <20131111034924.AB2581C0162@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67944:a6bcf5d26ccd Date: 2013-11-10 22:41 -0500 http://bitbucket.org/pypy/pypy/changeset/a6bcf5d26ccd/ Log: provide copy for numpy scalars diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -289,6 +289,9 @@ def descr_get_ndim(self, space): return space.wrap(0) + def descr_copy(self, space): + return self.convert_to(self.get_dtype(space)) + class W_BoolBox(W_GenericBox, PrimitiveBox): descr__new__, _get_dtype, descr_reduce = new_dtype_getter("bool") @@ -538,6 +541,7 @@ astype = interp2app(W_GenericBox.descr_astype), view = interp2app(W_GenericBox.descr_view), squeeze = interp2app(W_GenericBox.descr_self), + copy = interp2app(W_GenericBox.descr_copy), dtype = GetSetProperty(W_GenericBox.descr_get_dtype), itemsize = GetSetProperty(W_GenericBox.descr_get_itemsize), diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -65,6 +65,14 @@ assert type(a) is np.int32 assert a == 1 + def test_copy(self): + import numpy as np + a = np.int32(2) + b = a.copy() + assert type(b) is type(a) + assert b == a + assert b is not a + def test_squeeze(self): import numpy as np assert np.True_.squeeze() is np.True_ From noreply at buildbot.pypy.org Mon Nov 11 09:09:46 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Nov 2013 09:09:46 +0100 (CET) Subject: [pypy-commit] pypy default: Wait longer in this test Message-ID: <20131111080946.7E7D01C00B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67945:6f034f0a5e8c Date: 2013-11-11 09:08 +0100 http://bitbucket.org/pypy/pypy/changeset/6f034f0a5e8c/ Log: Wait longer in this test diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -56,7 +56,7 @@ interrupted = [] print('--- start ---') thread.start_new_thread(subthread, ()) - for j in range(10): + for j in range(30): if len(done): break print('.') time.sleep(0.25) From noreply at buildbot.pypy.org Mon Nov 11 09:09:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Nov 2013 09:09:47 +0100 (CET) Subject: [pypy-commit] pypy default: Fix on 32-bit Message-ID: <20131111080947.BCA7D1C00B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67946:64c45ff6bf6c Date: 2013-11-11 09:09 +0100 http://bitbucket.org/pypy/pypy/changeset/64c45ff6bf6c/ Log: Fix on 32-bit diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -316,9 +316,9 @@ ffi = cffi.FFI() ffi.cdef(""" struct s { - int x; - int y; - int z; + short x; + short y; + short z; }; """) @@ -339,9 +339,9 @@ guard_value(p165, ConstPtr(ptr70), descr=...) p166 = getfield_gc(p165, descr=) guard_value(p166, ConstPtr(ptr72), descr=...) - p167 = call(ConstClass(_ll_0_alloc_with_del___), descr=) + p167 = call(ConstClass(_ll_0_alloc_with_del___), descr=) guard_no_exception(descr=...) - i168 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 12, descr=) + i168 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, descr=) setfield_gc(p167, 0, descr=) setfield_gc(p167, ConstPtr(ptr86), descr=) guard_no_exception(descr=...) @@ -351,20 +351,20 @@ setfield_gc(p167, ConstPtr(ptr89), descr=) i171 = uint_gt(i170, i108) guard_false(i171, descr=...) - i172 = int_sub(i160, -2147483648) - i173 = int_and(i172, 4294967295) - i174 = int_add(i173, -2147483648) - setarrayitem_raw(i169, 0, i174, descr=) + i172 = int_sub(i160, -32768) + i173 = int_and(i172, 65535) + i174 = int_add(i173, -32768) + setarrayitem_raw(i169, 0, i174, descr=) i175 = int_add(i168, i121) i176 = int_sub(i160, i130) i177 = uint_gt(i176, i132) guard_false(i177, descr=...) - setarrayitem_raw(i175, 0, i174, descr=) + setarrayitem_raw(i175, 0, i174, descr=) i178 = int_add(i168, i140) i179 = int_sub(i160, i149) i180 = uint_gt(i179, i151) guard_false(i180, descr=...) - setarrayitem_raw(i178, 0, i174, descr=) + setarrayitem_raw(i178, 0, i174, descr=) --TICK-- i183 = arraylen_gc(p67, descr=) i184 = arraylen_gc(p92, descr=) From noreply at buildbot.pypy.org Mon Nov 11 09:11:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Nov 2013 09:11:24 +0100 (CET) Subject: [pypy-commit] pypy default: Randomly fix the numbers to make the test pass Message-ID: <20131111081124.1C77F1C00B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67947:d3423e1d4d2e Date: 2013-11-11 09:10 +0100 http://bitbucket.org/pypy/pypy/changeset/d3423e1d4d2e/ Log: Randomly fix the numbers to make the test pass diff --git a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py --- a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py +++ b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py @@ -73,9 +73,9 @@ bridge = bridges.get(mangle_descr(op.descr)) if bridge is not None: mod_bridges.append(bridge) - assert len(mod_bridges) in (1, 2) + assert len(mod_bridges) in (1, 2, 3) # check that counts are reasonable (precise # may change in the future) - assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1000 From noreply at buildbot.pypy.org Mon Nov 11 10:13:23 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 11 Nov 2013 10:13:23 +0100 (CET) Subject: [pypy-commit] buildbot add-header-to-nightly: update requirements Message-ID: <20131111091323.6CFD51C00B3@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: add-header-to-nightly Changeset: r880:49bca4151002 Date: 2013-11-10 19:33 +0100 http://bitbucket.org/pypy/buildbot/changeset/49bca4151002/ Log: update requirements diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,7 @@ buildbot-slave==0.8.6p1 decorator==3.4.0 mock==1.0.1 -py==1.4.9 +py==1.4.18 pytest==2.2.4 python-dateutil==1.5 sqlalchemy-migrate==0.7.2 From noreply at buildbot.pypy.org Mon Nov 11 10:13:25 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 11 Nov 2013 10:13:25 +0100 (CET) Subject: [pypy-commit] buildbot add-header-to-nightly: make first column left-aligned Message-ID: <20131111091325.ABFF61C04FF@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: add-header-to-nightly Changeset: r881:74576f8614cf Date: 2013-11-10 19:36 +0100 http://bitbucket.org/pypy/buildbot/changeset/74576f8614cf/ Log: make first column left-aligned diff --git a/master/templates/directory.html b/master/templates/directory.html --- a/master/templates/directory.html +++ b/master/templates/directory.html @@ -27,6 +27,7 @@ body { border: 0; padding: 0; margin: 0; background-color: #efefef; } td,th {padding-left: 0.5em; padding-right: 0.5em; } +td:first-child {text-align: left;} From noreply at buildbot.pypy.org Mon Nov 11 10:13:26 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 11 Nov 2013 10:13:26 +0100 (CET) Subject: [pypy-commit] buildbot add-header-to-nightly: fix Message-ID: <20131111091326.C250C1C00B3@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: add-header-to-nightly Changeset: r882:b2f717734873 Date: 2013-11-10 19:37 +0100 http://bitbucket.org/pypy/buildbot/changeset/b2f717734873/ Log: fix diff --git a/master/templates/directory.html b/master/templates/directory.html --- a/master/templates/directory.html +++ b/master/templates/directory.html @@ -41,7 +41,7 @@
Filename Sizeown tests applevel tests
DirectorySizeDate
{{ d.text }}{{ d.size }}{{ d.type }}{{ d.encoding }}{{ d.size}}{{ d.date}}
-{% if files|length > 1 %} +{% if files|length > 0 %} @@ -59,7 +59,7 @@ {% endif %} -{% for d in directories %} +{% for d in directories %} From noreply at buildbot.pypy.org Mon Nov 11 10:13:28 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 11 Nov 2013 10:13:28 +0100 (CET) Subject: [pypy-commit] buildbot numpy-tests: put numpy builder on tannit64 Message-ID: <20131111091328.024AE1C00B3@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: numpy-tests Changeset: r883:48f642fb266d Date: 2013-11-10 22:47 +0100 http://bitbucket.org/pypy/buildbot/changeset/48f642fb266d/ Log: put numpy builder on tannit64 diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -437,10 +437,11 @@ 'category': 'openindiana32', }, {'name': NUMPY_64, - 'slavenames': ['numpy64'], + 'slavenames': ["tannit64"], 'builddir': NUMPY_64, 'factory': pypyNumpyCompatability, 'category': 'numpy', + 'locks': [TannitCPU.access('counting')], }, {'name': PYPYBUILDBOT, 'slavenames': ['cobra'], From noreply at buildbot.pypy.org Mon Nov 11 10:13:29 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 11 Nov 2013 10:13:29 +0100 (CET) Subject: [pypy-commit] buildbot numpy-tests: close about to be merge branch Message-ID: <20131111091329.037FC1C00B3@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: numpy-tests Changeset: r884:817b7b7df378 Date: 2013-11-11 10:12 +0100 http://bitbucket.org/pypy/buildbot/changeset/817b7b7df378/ Log: close about to be merge branch From noreply at buildbot.pypy.org Mon Nov 11 10:13:30 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 11 Nov 2013 10:13:30 +0100 (CET) Subject: [pypy-commit] buildbot default: merge numpy-tests Message-ID: <20131111091330.092111C00B3@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r885:dab7ebb12eae Date: 2013-11-11 10:12 +0100 http://bitbucket.org/pypy/buildbot/changeset/dab7ebb12eae/ Log: merge numpy-tests diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -1,4 +1,5 @@ from buildbot.steps.source.mercurial import Mercurial +from buildbot.steps.source.git import Git from buildbot.process.buildstep import BuildStep from buildbot.process import factory from buildbot.steps import shell, transfer @@ -85,7 +86,7 @@ properties = self.build.getProperties() branch = map_branch_name(properties['branch']) - revision = properties['final_file_name'] + revision = properties.getProperty('final_file_name') mastersrc = os.path.expanduser(self.mastersrc) if branch.startswith('/'): @@ -185,6 +186,31 @@ # XXX in general it would be nice to drop the revision-number using only the # changeset-id for got_revision and final_file_name and sorting the builds # chronologically + +class UpdateGitCheckout(ShellCmd): + description = 'git checkout' + command = 'UNKNOWN' + + def __init__(self, workdir=None, haltOnFailure=True, force_branch=None, + **kwargs): + ShellCmd.__init__(self, workdir=workdir, haltOnFailure=haltOnFailure, + **kwargs) + self.force_branch = force_branch + self.addFactoryArguments(force_branch=force_branch) + + def start(self): + if self.force_branch is not None: + branch = self.force_branch + # Note: We could add a warning to the output if we + # ignore the branch set by the user. + else: + properties = self.build.getProperties() + branch = properties['branch'] or 'default' + command = ["git", "checkout", "-f", branch] + self.setCommand(command) + ShellCmd.start(self) + + class CheckGotRevision(ShellCmd): description = 'got_revision' command = ['hg', 'parents', '--template', 'got_revision:{rev}:{node}'] @@ -300,6 +326,15 @@ workdir=workdir, logEnviron=False)) +def update_git(platform, factory, repourl, workdir, use_branch, + force_branch=None): + factory.addStep( + Git( + repourl=repourl, + mode='full', + method='fresh', + workdir=workdir, + logEnviron=False)) def setup_steps(platform, factory, workdir=None, repourl='https://bitbucket.org/pypy/pypy/', @@ -607,20 +642,6 @@ locks=[lock.access('counting')], ) ) - if host == 'tannit': - pypy_c_rel = 'build/pypy/goal/pypy-c' - self.addStep(ShellCmd( - env={'PYTHONPATH': './benchmarks/lib/jinja2'}, - description="measure numpy compatibility", - command=[pypy_c_rel, - 'build/pypy/module/micronumpy/tool/numready/', - pypy_c_rel, 'numpy-compat.html'], - workdir=".")) - resfile = os.path.expanduser("~/numpy_compat/%(got_revision)s.html") - self.addStep(NumpyStatusUpload( - slavesrc="numpy-compat.html", - masterdest=WithProperties(resfile), - workdir=".")) pypy_c_rel = "../build/pypy/goal/pypy-c" self.addStep(ShellCmd( # this step needs exclusive access to the CPU @@ -758,3 +779,93 @@ "--resultlog=testrun.log", ], logfiles={'pytestLog': 'testrun.log'})) + + +class NativeNumpyTests(factory.BuildFactory): + ''' + Download a pypy nightly, install nose and numpy, and run the numpy test suite + ''' + def __init__(self, platform='linux', + app_tests=False, + host = 'tannit', + lib_python=False, + pypyjit=True, + prefix=None, + translationArgs=[] + ): + factory.BuildFactory.__init__(self) + + self.addStep(ParseRevision(hideStepIf=ParseRevision.hideStepIf, + doStepIf=ParseRevision.doStepIf)) + # download corresponding nightly build + self.addStep(ShellCmd( + description="Clear pypy-c", + command=['rm', '-rf', 'pypy-c'], + workdir='.')) + extension = get_extension(platform) + name = build_name(platform, pypyjit, translationArgs, placeholder='%(final_file_name)s') + extension + self.addStep(PyPyDownload( + basename=name, + mastersrc='~/nightly', + slavedest='pypy_build' + extension, + workdir='pypy-c')) + + # extract downloaded file + if platform.startswith('win'): + raise NotImplementedError + else: + self.addStep(ShellCmd( + description="decompress pypy-c", + command=['tar', '--extract', '--file=pypy_build'+ extension, '--strip-components=1', '--directory=.'], + workdir='pypy-c', + haltOnFailure=True, + )) + + # virtualenv the download + self.addStep(ShellCmd( + description="create virtualenv", + command=['virtualenv','-p', 'pypy-c/bin/pypy', 'install'], + workdir='./', + haltOnFailure=True, + )) + + self.addStep(ShellCmd( + description="install nose", + command=['install/bin/pip', 'install','nose'], + workdir='./', + haltOnFailure=True, + )) + + # obtain a pypy-compatible branch of numpy + numpy_url = 'https://www.bitbucket.org/pypy/numpy' + numpy_pypy_branch = 'pypy-compat' + update_git(platform, self, numpy_url, 'numpy_src', use_branch=True, + force_branch=numpy_pypy_branch) + + self.addStep(ShellCmd( + description="install numpy", + command=['../install/bin/python', 'setup.py','install'], + workdir='numpy_src')) + + self.addStep(ShellCmd( + description="test numpy", + command=['bin/nosetests', 'site-packages/numpy', + ], + #logfiles={'pytestLog': 'pytest-numpy.log'}, + timeout=4000, + workdir='install', + #env={"PYTHONPATH": ['download']}, # shouldn't be needed, but what if it is set externally? + )) + if host == 'tannit': + pypy_c_rel = 'install/bin/python' + self.addStep(ShellCmd( + description="measure numpy compatibility", + command=[pypy_c_rel, + 'numpy_src/tools/numready/', + pypy_c_rel, 'numpy-compat.html'], + workdir=".")) + resfile = os.path.expanduser("~/numpy_compat/%(got_revision)s.html") + self.addStep(NumpyStatusUpload( + slavesrc="numpy-compat.html", + masterdest=WithProperties(resfile), + workdir=".")) diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -148,6 +148,8 @@ pypyJITBenchmarkFactory64_tannit = pypybuilds.JITBenchmark(platform='linux64', postfix='-64') +pypyNumpyCompatability = pypybuilds.NativeNumpyTests(platform='linux64') + # LINUX32 = "own-linux-x86-32" @@ -180,7 +182,7 @@ JITBENCH64 = "jit-benchmark-linux-x86-64" JITBENCH64_2 = 'jit-benchmark-linux-x86-64-2' CPYTHON_64 = "cpython-2-benchmark-x86-64" - +NUMPY_64 = "numpy-compatability-linux-x86-64" # buildbot builder PYPYBUILDBOT = 'pypy-buildbot' @@ -264,6 +266,7 @@ JITONLYLINUXPPC64, JITBENCH, JITBENCH64, + NUMPY_64, ] + ARM.builderNames, properties=[]), ] + ARM.schedulers, @@ -433,6 +436,13 @@ 'factory': pypyOwnTestFactoryIndiana, 'category': 'openindiana32', }, + {'name': NUMPY_64, + 'slavenames': ["tannit64"], + 'builddir': NUMPY_64, + 'factory': pypyNumpyCompatability, + 'category': 'numpy', + 'locks': [TannitCPU.access('counting')], + }, {'name': PYPYBUILDBOT, 'slavenames': ['cobra'], 'builddir': PYPYBUILDBOT, From noreply at buildbot.pypy.org Mon Nov 11 10:13:31 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 11 Nov 2013 10:13:31 +0100 (CET) Subject: [pypy-commit] buildbot add-header-to-nightly: merge default Message-ID: <20131111091331.28BA21C00B3@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: add-header-to-nightly Changeset: r886:cb7170dc829e Date: 2013-11-11 10:13 +0100 http://bitbucket.org/pypy/buildbot/changeset/cb7170dc829e/ Log: merge default diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -1,4 +1,5 @@ from buildbot.steps.source.mercurial import Mercurial +from buildbot.steps.source.git import Git from buildbot.process.buildstep import BuildStep from buildbot.process import factory from buildbot.steps import shell, transfer @@ -85,7 +86,7 @@ properties = self.build.getProperties() branch = map_branch_name(properties['branch']) - revision = properties['final_file_name'] + revision = properties.getProperty('final_file_name') mastersrc = os.path.expanduser(self.mastersrc) if branch.startswith('/'): @@ -185,6 +186,31 @@ # XXX in general it would be nice to drop the revision-number using only the # changeset-id for got_revision and final_file_name and sorting the builds # chronologically + +class UpdateGitCheckout(ShellCmd): + description = 'git checkout' + command = 'UNKNOWN' + + def __init__(self, workdir=None, haltOnFailure=True, force_branch=None, + **kwargs): + ShellCmd.__init__(self, workdir=workdir, haltOnFailure=haltOnFailure, + **kwargs) + self.force_branch = force_branch + self.addFactoryArguments(force_branch=force_branch) + + def start(self): + if self.force_branch is not None: + branch = self.force_branch + # Note: We could add a warning to the output if we + # ignore the branch set by the user. + else: + properties = self.build.getProperties() + branch = properties['branch'] or 'default' + command = ["git", "checkout", "-f", branch] + self.setCommand(command) + ShellCmd.start(self) + + class CheckGotRevision(ShellCmd): description = 'got_revision' command = ['hg', 'parents', '--template', 'got_revision:{rev}:{node}'] @@ -300,6 +326,15 @@ workdir=workdir, logEnviron=False)) +def update_git(platform, factory, repourl, workdir, use_branch, + force_branch=None): + factory.addStep( + Git( + repourl=repourl, + mode='full', + method='fresh', + workdir=workdir, + logEnviron=False)) def setup_steps(platform, factory, workdir=None, repourl='https://bitbucket.org/pypy/pypy/', @@ -607,20 +642,6 @@ locks=[lock.access('counting')], ) ) - if host == 'tannit': - pypy_c_rel = 'build/pypy/goal/pypy-c' - self.addStep(ShellCmd( - env={'PYTHONPATH': './benchmarks/lib/jinja2'}, - description="measure numpy compatibility", - command=[pypy_c_rel, - 'build/pypy/module/micronumpy/tool/numready/', - pypy_c_rel, 'numpy-compat.html'], - workdir=".")) - resfile = os.path.expanduser("~/numpy_compat/%(got_revision)s.html") - self.addStep(NumpyStatusUpload( - slavesrc="numpy-compat.html", - masterdest=WithProperties(resfile), - workdir=".")) pypy_c_rel = "../build/pypy/goal/pypy-c" self.addStep(ShellCmd( # this step needs exclusive access to the CPU @@ -758,3 +779,93 @@ "--resultlog=testrun.log", ], logfiles={'pytestLog': 'testrun.log'})) + + +class NativeNumpyTests(factory.BuildFactory): + ''' + Download a pypy nightly, install nose and numpy, and run the numpy test suite + ''' + def __init__(self, platform='linux', + app_tests=False, + host = 'tannit', + lib_python=False, + pypyjit=True, + prefix=None, + translationArgs=[] + ): + factory.BuildFactory.__init__(self) + + self.addStep(ParseRevision(hideStepIf=ParseRevision.hideStepIf, + doStepIf=ParseRevision.doStepIf)) + # download corresponding nightly build + self.addStep(ShellCmd( + description="Clear pypy-c", + command=['rm', '-rf', 'pypy-c'], + workdir='.')) + extension = get_extension(platform) + name = build_name(platform, pypyjit, translationArgs, placeholder='%(final_file_name)s') + extension + self.addStep(PyPyDownload( + basename=name, + mastersrc='~/nightly', + slavedest='pypy_build' + extension, + workdir='pypy-c')) + + # extract downloaded file + if platform.startswith('win'): + raise NotImplementedError + else: + self.addStep(ShellCmd( + description="decompress pypy-c", + command=['tar', '--extract', '--file=pypy_build'+ extension, '--strip-components=1', '--directory=.'], + workdir='pypy-c', + haltOnFailure=True, + )) + + # virtualenv the download + self.addStep(ShellCmd( + description="create virtualenv", + command=['virtualenv','-p', 'pypy-c/bin/pypy', 'install'], + workdir='./', + haltOnFailure=True, + )) + + self.addStep(ShellCmd( + description="install nose", + command=['install/bin/pip', 'install','nose'], + workdir='./', + haltOnFailure=True, + )) + + # obtain a pypy-compatible branch of numpy + numpy_url = 'https://www.bitbucket.org/pypy/numpy' + numpy_pypy_branch = 'pypy-compat' + update_git(platform, self, numpy_url, 'numpy_src', use_branch=True, + force_branch=numpy_pypy_branch) + + self.addStep(ShellCmd( + description="install numpy", + command=['../install/bin/python', 'setup.py','install'], + workdir='numpy_src')) + + self.addStep(ShellCmd( + description="test numpy", + command=['bin/nosetests', 'site-packages/numpy', + ], + #logfiles={'pytestLog': 'pytest-numpy.log'}, + timeout=4000, + workdir='install', + #env={"PYTHONPATH": ['download']}, # shouldn't be needed, but what if it is set externally? + )) + if host == 'tannit': + pypy_c_rel = 'install/bin/python' + self.addStep(ShellCmd( + description="measure numpy compatibility", + command=[pypy_c_rel, + 'numpy_src/tools/numready/', + pypy_c_rel, 'numpy-compat.html'], + workdir=".")) + resfile = os.path.expanduser("~/numpy_compat/%(got_revision)s.html") + self.addStep(NumpyStatusUpload( + slavesrc="numpy-compat.html", + masterdest=WithProperties(resfile), + workdir=".")) diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -148,6 +148,8 @@ pypyJITBenchmarkFactory64_tannit = pypybuilds.JITBenchmark(platform='linux64', postfix='-64') +pypyNumpyCompatability = pypybuilds.NativeNumpyTests(platform='linux64') + # LINUX32 = "own-linux-x86-32" @@ -180,7 +182,7 @@ JITBENCH64 = "jit-benchmark-linux-x86-64" JITBENCH64_2 = 'jit-benchmark-linux-x86-64-2' CPYTHON_64 = "cpython-2-benchmark-x86-64" - +NUMPY_64 = "numpy-compatability-linux-x86-64" # buildbot builder PYPYBUILDBOT = 'pypy-buildbot' @@ -264,6 +266,7 @@ JITONLYLINUXPPC64, JITBENCH, JITBENCH64, + NUMPY_64, ] + ARM.builderNames, properties=[]), ] + ARM.schedulers, @@ -433,6 +436,13 @@ 'factory': pypyOwnTestFactoryIndiana, 'category': 'openindiana32', }, + {'name': NUMPY_64, + 'slavenames': ["tannit64"], + 'builddir': NUMPY_64, + 'factory': pypyNumpyCompatability, + 'category': 'numpy', + 'locks': [TannitCPU.access('counting')], + }, {'name': PYPYBUILDBOT, 'slavenames': ['cobra'], 'builddir': PYPYBUILDBOT, From noreply at buildbot.pypy.org Mon Nov 11 10:46:28 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Nov 2013 10:46:28 +0100 (CET) Subject: [pypy-commit] pypy default: PY_SSIZE_T_CLEAN support, second and hopefully final part Message-ID: <20131111094628.DA3941C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67949:0611a3ab0561 Date: 2013-11-11 10:45 +0100 http://bitbucket.org/pypy/pypy/changeset/0611a3ab0561/ Log: PY_SSIZE_T_CLEAN support, second and hopefully final part diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -386,6 +386,9 @@ 'PyString_FromFormat', 'PyString_FromFormatV', 'PyModule_AddObject', 'PyModule_AddIntConstant', 'PyModule_AddStringConstant', 'Py_BuildValue', 'Py_VaBuildValue', 'PyTuple_Pack', + '_PyArg_Parse_SizeT', '_PyArg_ParseTuple_SizeT', + '_PyArg_ParseTupleAndKeywords_SizeT', '_PyArg_VaParse_SizeT', + '_PyArg_VaParseTupleAndKeywords_SizeT', '_Py_BuildValue_SizeT', '_Py_VaBuildValue_SizeT', 'PyErr_Format', 'PyErr_NewException', 'PyErr_NewExceptionWithDoc', diff --git a/pypy/module/cpyext/include/eval.h b/pypy/module/cpyext/include/eval.h --- a/pypy/module/cpyext/include/eval.h +++ b/pypy/module/cpyext/include/eval.h @@ -10,8 +10,10 @@ #include "Python.h" #ifdef PY_SSIZE_T_CLEAN -#define PyPyObject_CallFunction _PyPyObject_CallFunction_SizeT -#define PyPyObject_CallMethod _PyPyObject_CallMethod_SizeT +#undef PyObject_CallFunction +#undef PyObject_CallMethod +#define PyObject_CallFunction _PyObject_CallFunction_SizeT +#define PyObject_CallMethod _PyObject_CallMethod_SizeT #endif #define PyEval_CallObject(func,arg) \ diff --git a/pypy/module/cpyext/include/modsupport.h b/pypy/module/cpyext/include/modsupport.h --- a/pypy/module/cpyext/include/modsupport.h +++ b/pypy/module/cpyext/include/modsupport.h @@ -10,9 +10,20 @@ /* If PY_SSIZE_T_CLEAN is defined, each functions treats #-specifier to mean Py_ssize_t */ #ifdef PY_SSIZE_T_CLEAN -#define PyPy_BuildValue _PyPy_BuildValue_SizeT -#define PyPy_VaBuildValue _PyPy_VaBuildValue_SizeT - /*XXX more*/ +#undef PyArg_Parse +#undef PyArg_ParseTuple +#undef PyArg_ParseTupleAndKeywords +#undef PyArg_VaParse +#undef PyArg_VaParseTupleAndKeywords +#undef Py_BuildValue +#undef Py_VaBuildValue +#define PyArg_Parse _PyArg_Parse_SizeT +#define PyArg_ParseTuple _PyArg_ParseTuple_SizeT +#define PyArg_ParseTupleAndKeywords _PyArg_ParseTupleAndKeywords_SizeT +#define PyArg_VaParse _PyArg_VaParse_SizeT +#define PyArg_VaParseTupleAndKeywords _PyArg_VaParseTupleAndKeywords_SizeT +#define Py_BuildValue _Py_BuildValue_SizeT +#define Py_VaBuildValue _Py_VaBuildValue_SizeT #endif #define PYTHON_API_VERSION 1013 @@ -26,6 +37,15 @@ const char *, char **, ...); int PyArg_VaParseTupleAndKeywords(PyObject *, PyObject *, const char *, char **, va_list); + +int _PyArg_Parse_SizeT(PyObject *, const char *, ...); +int _PyArg_ParseTuple_SizeT(PyObject *, const char *, ...); +int _PyArg_VaParse_SizeT(PyObject *, const char *, va_list); + +int _PyArg_ParseTupleAndKeywords_SizeT(PyObject *, PyObject *, + const char *, char **, ...); +int _PyArg_VaParseTupleAndKeywords_SizeT(PyObject *, PyObject *, + const char *, char **, va_list); /* to make sure that modules compiled with CPython's or PyPy's Python.h are not importable on the other interpreter, use a #define to expect a diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -53,7 +53,7 @@ } int -_PyArg_Parse_SizeT(PyObject *args, char *format, ...) +_PyArg_Parse_SizeT(PyObject *args, const char *format, ...) { int retval; va_list va; @@ -78,7 +78,7 @@ } int -_PyArg_ParseTuple_SizeT(PyObject *args, char *format, ...) +_PyArg_ParseTuple_SizeT(PyObject *args, const char *format, ...) { int retval; va_list va; @@ -109,7 +109,7 @@ } int -_PyArg_VaParse_SizeT(PyObject *args, char *format, va_list va) +_PyArg_VaParse_SizeT(PyObject *args, const char *format, va_list va) { va_list lva; diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -3,9 +3,11 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase class AppTestGetargs(AppTestCpythonExtensionBase): - def w_import_parser(self, implementation, argstyle='METH_VARARGS'): + def w_import_parser(self, implementation, argstyle='METH_VARARGS', + PY_SSIZE_T_CLEAN=False): mod = self.import_extension( - 'modname', [('funcname', argstyle, implementation)]) + 'modname', [('funcname', argstyle, implementation)], + PY_SSIZE_T_CLEAN=PY_SSIZE_T_CLEAN) return mod.funcname def test_pyarg_parse_int(self): @@ -179,3 +181,34 @@ ''') raises(TypeError, "charbuf(10)") assert 'foo\0bar\0baz' == charbuf('foo\0bar\0baz') + + def test_pyarg_parse_without_py_ssize_t(self): + import sys + charbuf = self.import_parser( + ''' + char *buf; + Py_ssize_t y = -1; + if (!PyArg_ParseTuple(args, "s#", &buf, &y)) { + return NULL; + } + return PyInt_FromSsize_t(y); + ''') + if sys.maxsize < 2**32: + expected = 5 + elif sys.byteorder == 'little': + expected = -0xfffffffb + else: + expected = 0x5ffffffff + assert charbuf('12345') == expected + + def test_pyarg_parse_with_py_ssize_t(self): + charbuf = self.import_parser( + ''' + char *buf; + Py_ssize_t y = -1; + if (!PyArg_ParseTuple(args, "s#", &buf, &y)) { + return NULL; + } + return PyInt_FromSsize_t(y); + ''', PY_SSIZE_T_CLEAN=True) + assert charbuf('12345') == 5 From noreply at buildbot.pypy.org Mon Nov 11 10:46:27 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Nov 2013 10:46:27 +0100 (CET) Subject: [pypy-commit] pypy default: Support PY_SSIZE_T_CLEAN, part 1 Message-ID: <20131111094627.97BA01C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67948:b41903344f4c Date: 2013-11-11 10:03 +0100 http://bitbucket.org/pypy/pypy/changeset/b41903344f4c/ Log: Support PY_SSIZE_T_CLEAN, part 1 diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -386,12 +386,14 @@ 'PyString_FromFormat', 'PyString_FromFormatV', 'PyModule_AddObject', 'PyModule_AddIntConstant', 'PyModule_AddStringConstant', 'Py_BuildValue', 'Py_VaBuildValue', 'PyTuple_Pack', + '_Py_BuildValue_SizeT', '_Py_VaBuildValue_SizeT', 'PyErr_Format', 'PyErr_NewException', 'PyErr_NewExceptionWithDoc', 'PySys_WriteStdout', 'PySys_WriteStderr', 'PyEval_CallFunction', 'PyEval_CallMethod', 'PyObject_CallFunction', 'PyObject_CallMethod', 'PyObject_CallFunctionObjArgs', 'PyObject_CallMethodObjArgs', + '_PyObject_CallFunction_SizeT', '_PyObject_CallMethod_SizeT', 'PyBuffer_FromMemory', 'PyBuffer_FromReadWriteMemory', 'PyBuffer_FromObject', 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', 'init_bufferobject', diff --git a/pypy/module/cpyext/include/eval.h b/pypy/module/cpyext/include/eval.h --- a/pypy/module/cpyext/include/eval.h +++ b/pypy/module/cpyext/include/eval.h @@ -9,6 +9,11 @@ #include "Python.h" +#ifdef PY_SSIZE_T_CLEAN +#define PyPyObject_CallFunction _PyPyObject_CallFunction_SizeT +#define PyPyObject_CallMethod _PyPyObject_CallMethod_SizeT +#endif + #define PyEval_CallObject(func,arg) \ PyEval_CallObjectWithKeywords(func, arg, (PyObject *)NULL) @@ -16,6 +21,8 @@ PyObject * PyEval_CallMethod(PyObject *obj, const char *name, const char *format, ...); PyObject * PyObject_CallFunction(PyObject *obj, const char *format, ...); PyObject * PyObject_CallMethod(PyObject *obj, const char *name, const char *format, ...); +PyObject * _PyObject_CallFunction_SizeT(PyObject *obj, const char *format, ...); +PyObject * _PyObject_CallMethod_SizeT(PyObject *obj, const char *name, const char *format, ...); PyObject * PyObject_CallFunctionObjArgs(PyObject *callable, ...); PyObject * PyObject_CallMethodObjArgs(PyObject *callable, PyObject *name, ...); diff --git a/pypy/module/cpyext/include/modsupport.h b/pypy/module/cpyext/include/modsupport.h --- a/pypy/module/cpyext/include/modsupport.h +++ b/pypy/module/cpyext/include/modsupport.h @@ -7,6 +7,14 @@ extern "C" { #endif +/* If PY_SSIZE_T_CLEAN is defined, each functions treats #-specifier + to mean Py_ssize_t */ +#ifdef PY_SSIZE_T_CLEAN +#define PyPy_BuildValue _PyPy_BuildValue_SizeT +#define PyPy_VaBuildValue _PyPy_VaBuildValue_SizeT + /*XXX more*/ +#endif + #define PYTHON_API_VERSION 1013 #define PYTHON_API_STRING "1013" diff --git a/pypy/module/cpyext/src/abstract.c b/pypy/module/cpyext/src/abstract.c --- a/pypy/module/cpyext/src/abstract.c +++ b/pypy/module/cpyext/src/abstract.c @@ -150,6 +150,26 @@ } PyObject * +_PyObject_CallFunction_SizeT(PyObject *callable, const char *format, ...) +{ + va_list va; + PyObject *args; + + if (callable == NULL) + return null_error(); + + if (format && *format) { + va_start(va, format); + args = _Py_VaBuildValue_SizeT(format, va); + va_end(va); + } + else + args = PyTuple_New(0); + + return call_function_tail(callable, args); +} + +PyObject * PyObject_CallMethod(PyObject *o, const char *name, const char *format, ...) { va_list va; @@ -188,6 +208,45 @@ return retval; } +PyObject * +_PyObject_CallMethod_SizeT(PyObject *o, const char *name, const char *format, ...) +{ + va_list va; + PyObject *args; + PyObject *func = NULL; + PyObject *retval = NULL; + + if (o == NULL || name == NULL) + return null_error(); + + func = PyObject_GetAttrString(o, name); + if (func == NULL) { + PyErr_SetString(PyExc_AttributeError, name); + return 0; + } + + if (!PyCallable_Check(func)) { + type_error("attribute of type '%.200s' is not callable", func); + goto exit; + } + + if (format && *format) { + va_start(va, format); + args = _Py_VaBuildValue_SizeT(format, va); + va_end(va); + } + else + args = PyTuple_New(0); + + retval = call_function_tail(func, args); + + exit: + /* args gets consumed in call_function_tail */ + Py_XDECREF(func); + + return retval; +} + static PyObject * objargs_mktuple(va_list va) { diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -229,9 +229,11 @@ return space.wrap(pydname) @unwrap_spec(name=str, init='str_or_None', body=str, - load_it=bool, filename='str_or_None') + load_it=bool, filename='str_or_None', + PY_SSIZE_T_CLEAN=bool) def import_module(space, name, init=None, body='', - load_it=True, filename=None): + load_it=True, filename=None, + PY_SSIZE_T_CLEAN=False): """ init specifies the overall template of the module. @@ -243,15 +245,19 @@ """ if init is not None: code = """ + %(PY_SSIZE_T_CLEAN)s #include %(body)s void init%(name)s(void) { %(init)s } - """ % dict(name=name, init=init, body=body) + """ % dict(name=name, init=init, body=body, + PY_SSIZE_T_CLEAN='#define PY_SSIZE_T_CLEAN' + if PY_SSIZE_T_CLEAN else '') kwds = dict(separate_module_sources=[code]) else: + assert not PY_SSIZE_T_CLEAN if filename is None: filename = name filename = py.path.local(pypydir) / 'module' \ @@ -276,8 +282,9 @@ space.sys.get('modules'), space.wrap(name)) - @unwrap_spec(modname=str, prologue=str) - def import_extension(space, modname, w_functions, prologue=""): + @unwrap_spec(modname=str, prologue=str, PY_SSIZE_T_CLEAN=bool) + def import_extension(space, modname, w_functions, prologue="", + PY_SSIZE_T_CLEAN=False): functions = space.unwrap(w_functions) methods_table = [] codes = [] @@ -300,7 +307,8 @@ }; """ % ('\n'.join(methods_table),) init = """Py_InitModule("%s", methods);""" % (modname,) - return import_module(space, name=modname, init=init, body=body) + return import_module(space, name=modname, init=init, body=body, + PY_SSIZE_T_CLEAN=PY_SSIZE_T_CLEAN) @unwrap_spec(name=str) def record_imported_module(name): diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py --- a/pypy/module/cpyext/test/test_eval.py +++ b/pypy/module/cpyext/test/test_eval.py @@ -212,7 +212,7 @@ ("call_func", "METH_VARARGS", """ return PyObject_CallFunction(PyTuple_GetItem(args, 0), - "siO", "text", 42, Py_None); + "siiiiO", "text", 42, -41, 40, -39, Py_None); """), ("call_method", "METH_VARARGS", """ @@ -222,9 +222,28 @@ ]) def f(*args): return args - assert module.call_func(f) == ("text", 42, None) + assert module.call_func(f) == ("text", 42, -41, 40, -39, None) assert module.call_method("text") == 2 + def test_CallFunction_PY_SSIZE_T_CLEAN(self): + module = self.import_extension('foo', [ + ("call_func", "METH_VARARGS", + """ + return PyObject_CallFunction(PyTuple_GetItem(args, 0), + "s#s#", "text", (Py_ssize_t)3, "othertext", (Py_ssize_t)6); + """), + ("call_method", "METH_VARARGS", + """ + return PyObject_CallMethod(PyTuple_GetItem(args, 0), + "find", "s#", "substring", (Py_ssize_t)6); + """), + ], PY_SSIZE_T_CLEAN=True) + def f(*args): + return args + assert module.call_func(f) == ("tex", "othert") + assert module.call_method("<>") == -1 + assert module.call_method("<>") == 2 + def test_CallFunctionObjArgs(self): module = self.import_extension('foo', [ ("call_func", "METH_VARARGS", From noreply at buildbot.pypy.org Mon Nov 11 10:56:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Nov 2013 10:56:47 +0100 (CET) Subject: [pypy-commit] pypy default: Import cffi/861bff9ef031 Message-ID: <20131111095647.04D571C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67950:3c192b92f54e Date: 2013-11-11 10:56 +0100 http://bitbucket.org/pypy/pypy/changeset/3c192b92f54e/ Log: Import cffi/861bff9ef031 diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -160,7 +160,10 @@ def __dir__(self): return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() - module._cffi_setup(lst, ffiplatform.VerificationError, library) + if module._cffi_setup(lst, ffiplatform.VerificationError, library): + import warnings + warnings.warn("reimporting %r might overwrite older definitions" + % (self.verifier.get_module_name())) # # finally, call the loaded_cpy_xxx() functions. This will perform # the final adjustments, like copying the Python->C wrapper @@ -646,12 +649,23 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %d, ' - 'not %d",') - prnt(' "%s", "%s", (int)%s, %d);' % ( - name, enumerator, enumerator, enumvalue)) + prnt(' "enum %s: %s has the real value %s, ' + 'not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + name, enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return %s;' % self._chained_list_constants[True]) @@ -743,12 +757,9 @@ def _generate_setup_custom(self): prnt = self._prnt - prnt('static PyObject *_cffi_setup_custom(PyObject *lib)') + prnt('static int _cffi_setup_custom(PyObject *lib)') prnt('{') - prnt(' if (%s < 0)' % self._chained_list_constants[True]) - prnt(' return NULL;') - prnt(' Py_INCREF(Py_None);') - prnt(' return Py_None;') + prnt(' return %s;' % self._chained_list_constants[True]) prnt('}') cffimod_header = r''' @@ -866,17 +877,20 @@ static void *_cffi_exports[_CFFI_NUM_EXPORTS]; static PyObject *_cffi_types, *_cffi_VerificationError; -static PyObject *_cffi_setup_custom(PyObject *lib); /* forward */ +static int _cffi_setup_custom(PyObject *lib); /* forward */ static PyObject *_cffi_setup(PyObject *self, PyObject *args) { PyObject *library; + int was_alive = (_cffi_types != NULL); if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, &library)) return NULL; Py_INCREF(_cffi_types); Py_INCREF(_cffi_VerificationError); - return _cffi_setup_custom(library); + if (_cffi_setup_custom(library) < 0) + return NULL; + return PyBool_FromLong(was_alive); } static void _cffi_init(void) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -422,11 +422,22 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' snprintf(out_error, 255,' - '"%s has the real value %d, not %d",') - prnt(' "%s", (int)%s, %d);' % ( - enumerator, enumerator, enumvalue)) + ' "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % ( + enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py @@ -1596,6 +1596,19 @@ ## assert ffi.sizeof("enum foo_e") == expected_size ## assert int(ffi.cast("enum foo_e", -1)) == expected_minus1 +def test_enum_bug118(): + maxulong = 256 ** FFI().sizeof("unsigned long") - 1 + for c1, c2, c2c in [(0xffffffff, -1, ''), + (maxulong, -1, ''), + (-1, 0xffffffff, 'U'), + (-1, maxulong, 'UL')]: + ffi = FFI() + ffi.cdef("enum foo_e { AA=%s };" % c1) + e = py.test.raises(VerificationError, ffi.verify, + "enum foo_e { AA=%s%s };" % (c2, c2c)) + assert str(e.value) == ('enum foo_e: AA has the real value %d, not %d' + % (c2, c1)) + def test_string_to_voidp_arg(): ffi = FFI() ffi.cdef("int myfunc(void *);") From noreply at buildbot.pypy.org Mon Nov 11 11:23:26 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Nov 2013 11:23:26 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Make the release branch. Message-ID: <20131111102326.45CBC1C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67951:f18b1bc6357c Date: 2013-11-11 11:22 +0100 http://bitbucket.org/pypy/pypy/changeset/f18b1bc6357c/ Log: Make the release branch. From noreply at buildbot.pypy.org Mon Nov 11 16:15:24 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 11 Nov 2013 16:15:24 +0100 (CET) Subject: [pypy-commit] buildbot default: make sure we install jinja2 to generate the NumPyPy Status page Message-ID: <20131111151524.B6CC41C00B3@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r887:f62e6adee795 Date: 2013-11-11 16:14 +0100 http://bitbucket.org/pypy/buildbot/changeset/f62e6adee795/ Log: make sure we install jinja2 to generate the NumPyPy Status page diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -857,6 +857,11 @@ #env={"PYTHONPATH": ['download']}, # shouldn't be needed, but what if it is set externally? )) if host == 'tannit': + self.addStep(ShellCmd( + description="install jinja2", + command=['install/bin/pip', 'install', 'jinja2'], + workdir='./', + haltOnFailure=True,)) pypy_c_rel = 'install/bin/python' self.addStep(ShellCmd( description="measure numpy compatibility", From noreply at buildbot.pypy.org Mon Nov 11 19:15:28 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 11 Nov 2013 19:15:28 +0100 (CET) Subject: [pypy-commit] pypy default: fix this test on win32 (tzset is unix-only) Message-ID: <20131111181528.DDE341C01F4@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67952:b87ac0ef8a37 Date: 2013-11-11 13:13 -0500 http://bitbucket.org/pypy/pypy/changeset/b87ac0ef8a37/ Log: fix this test on win32 (tzset is unix-only) diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -573,10 +573,10 @@ def test_reimport_builtin_simple_case_1(self): import sys, time - del time.tzset + del time.clock del sys.modules['time'] import time - assert hasattr(time, 'tzset') + assert hasattr(time, 'clock') def test_reimport_builtin_simple_case_2(self): skip("fix me") From noreply at buildbot.pypy.org Mon Nov 11 19:34:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Nov 2013 19:34:04 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Update the version numbers Message-ID: <20131111183404.6F15C1C02AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67953:d90f34a00b48 Date: 2013-11-11 19:32 +0100 http://bitbucket.org/pypy/pypy/changeset/d90f34a00b48/ Log: Update the version numbers diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.3" /* PyPy version as a string */ -#define PYPY_VERSION "2.2.0-alpha0" +#define PYPY_VERSION "2.2.0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -11,7 +11,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 2, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 2, 0, "final", 0) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) From noreply at buildbot.pypy.org Mon Nov 11 19:34:05 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Nov 2013 19:34:05 +0100 (CET) Subject: [pypy-commit] pypy default: Update the version numbers past the release Message-ID: <20131111183405.7EE331C02AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67954:b872664eef96 Date: 2013-11-11 19:33 +0100 http://bitbucket.org/pypy/pypy/changeset/b872664eef96/ Log: Update the version numbers past the release diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.3" /* PyPy version as a string */ -#define PYPY_VERSION "2.2.0-alpha0" +#define PYPY_VERSION "2.2.1-alpha0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -11,7 +11,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 2, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 2, 1, "alpha", 0) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) From noreply at buildbot.pypy.org Mon Nov 11 19:54:32 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Nov 2013 19:54:32 +0100 (CET) Subject: [pypy-commit] pypy default: Update the contributor list Message-ID: <20131111185432.07EBF1C00B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67955:4cd1bc8b3111 Date: 2013-11-11 19:53 +0100 http://bitbucket.org/pypy/pypy/changeset/4cd1bc8b3111/ Log: Update the contributor list diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -17,24 +17,24 @@ David Schneider Holger Krekel Christian Tismer + Matti Picus Hakan Ardo Benjamin Peterson - Matti Picus Philip Jenvey Anders Chrigstrom Brian Kearns + Manuel Jacob Eric van Riet Paap + Wim Lavrijsen Richard Emslie Alexander Schremmer - Wim Lavrijsen Dan Villiom Podlaski Christiansen - Manuel Jacob + Ronan Lamy Lukas Diekmann Sven Hager Anders Lehmann Aurelien Campeas Niklaus Haldimann - Ronan Lamy Camillo Bruni Laura Creighton Toon Verwaest @@ -45,8 +45,10 @@ David Edelsohn Anders Hammarquist Jakub Gustak + Romain Guillebert Guido Wesdorp Lawrence Oluyede + Remi Meier Bartosz Skowron Daniel Roberts Niko Matsakis @@ -54,18 +56,17 @@ Ludovic Aubry Alexander Hesse Jacob Hallen - Romain Guillebert Jason Creighton Alex Martelli Michal Bendowski Jan de Mooij + stian Michael Foord Stephan Diehl Stefan Schwarzer Valentino Volonghi Tomek Meka Patrick Maupin - stian Bob Ippolito Bruno Gola Jean-Paul Calderone @@ -74,29 +75,33 @@ Simon Burton Marius Gedminas John Witulski + Konstantin Lopuhin Greg Price Dario Bertini Mark Pearse Simon Cross - Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy Adrian Kuhn Boris Feigin + Stefano Rivera tav + Taavi Burns Georg Brandl Bert Freudenberg Stian Andreassen - Stefano Rivera Wanja Saatkamp Gerald Klix Mike Blume - Taavi Burns Oscar Nierstrasz + Stefan H. Muller + Laurence Tratt + Rami Chowdhury David Malcolm Eugene Oden Henry Mason @@ -105,14 +110,15 @@ David Ripton Dusty Phillips Lukas Renggli + Edd Barrett Guenter Jantzen Tobias Oberstein - Remi Meier Ned Batchelder Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin + Andrew Chambers Michael Schneider Nicholas Riley Jason Chu @@ -128,10 +134,13 @@ Olivier Dormond Jared Grubb Karl Bartel + Tobias Pape Brian Dorsey Victor Stinner + Andrews Medina Stuart Williams Jasper Schulz + Christian Hudon Toby Watson Antoine Pitrou Aaron Iles @@ -141,7 +150,6 @@ Neil Shepperd Mikael Schönenberg Elmo Mäntynen - Tobias Pape Jonathan David Riehl Stanislaw Halik Anders Qvist @@ -153,19 +161,15 @@ Alexander Sedov Corbin Simpson Christopher Pope - Laurence Tratt - Guillebert Romain Christian Tismer Dan Stromberg Stefano Parmesan - Christian Hudon Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang Gabriel - Paweł Piotr Przeradowski Andrew Dalke Sylvain Thenault Nathan Taylor @@ -189,13 +193,15 @@ Martin Blais Lene Wagner Tomo Cocoa - Andrews Medina roberto at goyle + Yury V. Zaytsev + Anna Katrina Dominguez William Leslie Bobby Impollonia timo at eistee.fritz.box Andrew Thompson Yusei Tahara + Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado Godefroid Chappelle @@ -209,7 +215,7 @@ Akira Li Gustavo Niemeyer Stephan Busemann - Anna Katrina Dominguez + Rafał Gałczyński Christian Muirhead James Lan shoma hosaka @@ -219,6 +225,7 @@ Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + w31rd0 Jim Baker Rodrigo Araújo Armin Ronacher @@ -234,12 +241,12 @@ Even Wiik Thomassen jbs soareschen + Mike Bayer Flavio Percoco Kristoffer Kleine yasirs Michael Chermside Anna Ravencroft - Andrew Chambers Julien Phalip Dan Loewenherz diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py --- a/pypy/doc/tool/makecontributor.py +++ b/pypy/doc/tool/makecontributor.py @@ -1,3 +1,5 @@ +# NOTE: run this script with LANG=en_US.UTF-8 + import py import sys from collections import defaultdict @@ -132,7 +134,7 @@ if show_numbers: print '%5d %s' % (n, name) else: - print name + print ' ' + name if __name__ == '__main__': show_numbers = '-n' in sys.argv From noreply at buildbot.pypy.org Mon Nov 11 20:16:52 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Nov 2013 20:16:52 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Updates to the doc Message-ID: <20131111191652.2D6451C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67956:d89118ea1757 Date: 2013-11-11 20:15 +0100 http://bitbucket.org/pypy/pypy/changeset/d89118ea1757/ Log: Updates to the doc diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -33,7 +33,7 @@ $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest -to use virtualenv with the resulting pypy-c as the interpreter, you can +to use virtualenv with the resulting pypy-c as the interpreter; you can find more details about various installation schemes here: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '2.1' +version = '2.2' # The full version, including alpha/beta/rc tags. -release = '2.1.0' +release = '2.2.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -49,5 +49,5 @@ python-announce, python-dev ... * add a tag on jitviewer that corresponds to pypy release -* add a tag on codespeed that corresponds to pypy release +* add a tag on codespeed that corresponds to pypy release (XXX where??) diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.1.0`_: the latest official release +* `Release 2.2.0`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.1.0`: http://pypy.org/download.html +.. _`Release 2.2.0`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/release-2.2.0.rst b/pypy/doc/release-2.2.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.2.0.rst @@ -0,0 +1,9 @@ +======================================= +PyPy 2.2 - xxx +======================================= + +GC! + + +numpypy module was removed in favor of an external numpy fork +at https://bitbucket.org/pypy/numpy diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-2.2.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-2.2.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-2.2.rst @@ -1,5 +1,5 @@ ====================== -What's new in PyPy 2.1 +What's new in PyPy 2.2 ====================== .. this is a revision shortly after release-2.1-beta diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,140 +1,7 @@ -====================== -What's new in PyPy 2.1 -====================== +======================= +What's new in PyPy 2.2+ +======================= -.. this is a revision shortly after release-2.1-beta -.. startrev: 4eb52818e7c0 +.. this is a revision shortly after release-2.2.x +.. startrev: 4cd1bc8b3111 -.. branch: sanitise_bytecode_dispatch -Make PyPy's bytecode dispatcher easy to read, and less reliant on RPython -magic. There is no functional change, though the removal of dead code leads -to many fewer tests to execute. - -.. branch: fastjson -Fast json decoder written in RPython, about 3-4x faster than the pure Python -decoder which comes with the stdlib - -.. branch: improve-str2charp -Improve the performance of I/O writing up to 15% by using memcpy instead of -copying char-by-char in str2charp and get_nonmovingbuffer - -.. branch: flowoperators -Simplify rpython/flowspace/ code by using more metaprogramming. Create -SpaceOperator class to gather static information about flow graph operations. - -.. branch: package-tk -Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch -to optionally skip it. - -.. branch: distutils-cppldflags -Copy CPython's implementation of customize_compiler, dont call split on -environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. - -.. branch: precise-instantiate -When an RPython class is instantiated via an indirect call (that is, which -class is being instantiated isn't known precisely) allow the optimizer to have -more precise information about which functions can be called. Needed for Topaz. - -.. branch: ssl_moving_write_buffer - -.. branch: pythoninspect-fix -Make PyPy respect PYTHONINSPECT variable set via os.putenv in the same process -to start interactive prompt when the script execution finishes. This adds -new __pypy__.os.real_getenv call that bypasses Python cache and looksup env -in the underlying OS. Translatorshell now works on PyPy. - -.. branch: add-statvfs -Added os.statvfs and os.fstatvfs - -.. branch: statvfs_tests -Added some addition tests for statvfs. - -.. branch: ndarray-subtype -Allow subclassing ndarray, i.e. matrix - -.. branch: ndarray-sort -Implement ndarray in-place sorting (for numeric types, no non-native byte order) - -.. branch: pypy-pyarray -Implement much of numpy's c api in cpyext, allows (slow) access to ndarray -from c - -.. branch: kill-ootype - -.. branch: fast-slowpath -Added an abstraction for functions with a fast and slow path in the JIT. This -speeds up list.append() and list.pop(). - -.. branch: curses_fixes - -.. branch: foldable-getarrayitem-indexerror -Constant-fold reading out of constant tuples in PyPy. - -.. branch: mro-reorder-numpypy-str -No longer delegate numpy string_ methods to space.StringObject, in numpy -this works by kind of by accident. Support for merging the refactor-str-types -branch - -.. branch: kill-typesystem -Remove the "type system" abstraction, now that there is only ever one kind of -type system used. - -.. branch: kill-gen-store-back-in -Kills gen_store_back_in_virtualizable - should improve non-inlined calls by -a bit - -.. branch: dotviewer-linewidth -.. branch: reflex-support -.. branch: numpypy-inplace-op -.. branch: rewritten-loop-logging -.. branch: no-release-gil -.. branch: safe-win-mmap -.. branch: boolean-indexing-cleanup -.. branch: cpyext-best_base -.. branch: cpyext-int -.. branch: fileops2 - -.. branch: nobold-backtrace -Work on improving UnionError messages and stack trace displays. - -.. branch: improve-errors-again -More improvements and refactorings of error messages. - -.. branch: improve-errors-again2 -Unbreak tests in rlib. - -.. branch: less-stringly-ops -Use subclasses of SpaceOperation instead of SpaceOperator objects. -Random cleanups in flowspace. - -.. branch: file-support-in-rpython -make open() and friends rpython - -.. branch: incremental-gc -Added the new incminimark GC which performs GC in incremental steps - -.. branch: fast_cffi_list_init -fastpath for cffi.new("long[]") - -.. branch: remove-eval-frame -remove a pointless abstraction - -.. branch: jit-settrace -Allow the jit to continue running when sys.settrace() is active, necessary to -make coverage.py fast - -.. branch: remove-numpypy -Remove lib_pypy/numpypy in favor of external numpy fork - -.. branch: jit-counter -Tweak the jit counters: decay them at minor collection (actually -only every 32 minor collection is enough). Should avoid the "memory -leaks" observed in long-running processes, actually created by the -jit compiling more and more rarely executed paths. - -.. branch: fix-trace-jit -Fixed the usage of sys.settrace() with the JIT. Also made it so using -sys.settrace() doesn't cause the GIL to be released on every single iteration. - -.. branch: rordereddict -Implement OrderedDict in RPython From noreply at buildbot.pypy.org Mon Nov 11 20:17:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Nov 2013 20:17:12 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Update the contributor list Message-ID: <20131111191712.A5C2D1C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67957:154ce6d69246 Date: 2013-11-11 19:53 +0100 http://bitbucket.org/pypy/pypy/changeset/154ce6d69246/ Log: Update the contributor list diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -17,24 +17,24 @@ David Schneider Holger Krekel Christian Tismer + Matti Picus Hakan Ardo Benjamin Peterson - Matti Picus Philip Jenvey Anders Chrigstrom Brian Kearns + Manuel Jacob Eric van Riet Paap + Wim Lavrijsen Richard Emslie Alexander Schremmer - Wim Lavrijsen Dan Villiom Podlaski Christiansen - Manuel Jacob + Ronan Lamy Lukas Diekmann Sven Hager Anders Lehmann Aurelien Campeas Niklaus Haldimann - Ronan Lamy Camillo Bruni Laura Creighton Toon Verwaest @@ -45,8 +45,10 @@ David Edelsohn Anders Hammarquist Jakub Gustak + Romain Guillebert Guido Wesdorp Lawrence Oluyede + Remi Meier Bartosz Skowron Daniel Roberts Niko Matsakis @@ -54,18 +56,17 @@ Ludovic Aubry Alexander Hesse Jacob Hallen - Romain Guillebert Jason Creighton Alex Martelli Michal Bendowski Jan de Mooij + stian Michael Foord Stephan Diehl Stefan Schwarzer Valentino Volonghi Tomek Meka Patrick Maupin - stian Bob Ippolito Bruno Gola Jean-Paul Calderone @@ -74,29 +75,33 @@ Simon Burton Marius Gedminas John Witulski + Konstantin Lopuhin Greg Price Dario Bertini Mark Pearse Simon Cross - Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy Adrian Kuhn Boris Feigin + Stefano Rivera tav + Taavi Burns Georg Brandl Bert Freudenberg Stian Andreassen - Stefano Rivera Wanja Saatkamp Gerald Klix Mike Blume - Taavi Burns Oscar Nierstrasz + Stefan H. Muller + Laurence Tratt + Rami Chowdhury David Malcolm Eugene Oden Henry Mason @@ -105,14 +110,15 @@ David Ripton Dusty Phillips Lukas Renggli + Edd Barrett Guenter Jantzen Tobias Oberstein - Remi Meier Ned Batchelder Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin + Andrew Chambers Michael Schneider Nicholas Riley Jason Chu @@ -128,10 +134,13 @@ Olivier Dormond Jared Grubb Karl Bartel + Tobias Pape Brian Dorsey Victor Stinner + Andrews Medina Stuart Williams Jasper Schulz + Christian Hudon Toby Watson Antoine Pitrou Aaron Iles @@ -141,7 +150,6 @@ Neil Shepperd Mikael Schönenberg Elmo Mäntynen - Tobias Pape Jonathan David Riehl Stanislaw Halik Anders Qvist @@ -153,19 +161,15 @@ Alexander Sedov Corbin Simpson Christopher Pope - Laurence Tratt - Guillebert Romain Christian Tismer Dan Stromberg Stefano Parmesan - Christian Hudon Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang Gabriel - Paweł Piotr Przeradowski Andrew Dalke Sylvain Thenault Nathan Taylor @@ -189,13 +193,15 @@ Martin Blais Lene Wagner Tomo Cocoa - Andrews Medina roberto at goyle + Yury V. Zaytsev + Anna Katrina Dominguez William Leslie Bobby Impollonia timo at eistee.fritz.box Andrew Thompson Yusei Tahara + Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado Godefroid Chappelle @@ -209,7 +215,7 @@ Akira Li Gustavo Niemeyer Stephan Busemann - Anna Katrina Dominguez + Rafał Gałczyński Christian Muirhead James Lan shoma hosaka @@ -219,6 +225,7 @@ Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + w31rd0 Jim Baker Rodrigo Araújo Armin Ronacher @@ -234,12 +241,12 @@ Even Wiik Thomassen jbs soareschen + Mike Bayer Flavio Percoco Kristoffer Kleine yasirs Michael Chermside Anna Ravencroft - Andrew Chambers Julien Phalip Dan Loewenherz diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py --- a/pypy/doc/tool/makecontributor.py +++ b/pypy/doc/tool/makecontributor.py @@ -1,3 +1,5 @@ +# NOTE: run this script with LANG=en_US.UTF-8 + import py import sys from collections import defaultdict @@ -132,7 +134,7 @@ if show_numbers: print '%5d %s' % (n, name) else: - print name + print ' ' + name if __name__ == '__main__': show_numbers = '-n' in sys.argv From noreply at buildbot.pypy.org Mon Nov 11 20:19:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Nov 2013 20:19:17 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Precision from irc Message-ID: <20131111191917.686A81C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67958:be2e961e769d Date: 2013-11-11 20:18 +0100 http://bitbucket.org/pypy/pypy/changeset/be2e961e769d/ Log: Precision from irc diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -48,6 +48,6 @@ * send announcements to pypy-dev, python-list, python-announce, python-dev ... -* add a tag on jitviewer that corresponds to pypy release -* add a tag on codespeed that corresponds to pypy release (XXX where??) +* add a tag on the pypy/jitviewer repo that corresponds to pypy release +* add a tag on the codespeed web site that corresponds to pypy release From noreply at buildbot.pypy.org Mon Nov 11 21:40:01 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 11 Nov 2013 21:40:01 +0100 (CET) Subject: [pypy-commit] buildbot add-header-to-nightly: remove testing columns if empty and add numpy-status pages to header Message-ID: <20131111204001.A85EC1C00B3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: add-header-to-nightly Changeset: r888:a970180c8b91 Date: 2013-11-11 22:38 +0200 http://bitbucket.org/pypy/buildbot/changeset/a970180c8b91/ Log: remove testing columns if empty and add numpy-status pages to header diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -152,9 +152,6 @@ self.contentEncodings, self.defaultType) -class NumpyStatusList(File): - pass - class PyPyDirectoryLister(DirectoryLister): '''template based, uses master/templates/directory.html ''' @@ -233,3 +230,6 @@ else: return rowClass + '-failed' +class NumpyStatusList(PyPyList): + pass + diff --git a/master/templates/directory.html b/master/templates/directory.html --- a/master/templates/directory.html +++ b/master/templates/directory.html @@ -39,6 +39,9 @@ {% set row_class = cycler('odd', 'even') %} +{% set has_tests = files|join('', attribute='own_summary')|length > 0 or + files|join('', attribute='app_summary')|length > 0 %} +
Filename Size
{{ d.text }} {{ d.size}}
{% if files|length > 0 %} @@ -46,16 +49,20 @@ +{% if has_tests %} +{% endif %} {% else %} +{% if has_tests %} +{% endif %} {% endif %} @@ -64,8 +71,10 @@ +{% if has_tests %} +{% endif %} {% endfor %} @@ -74,8 +83,10 @@ +{% if has_tests %} +{% endif %} {% endfor %}
Filename Size Dateown tests applevel tests
Directory Size Date
{{ d.text }} {{ d.size}} {{ d.date}}
{{ f.text }} {{ f.size }} {{ f.date }}{{ f.own_summary }} {{ f.app_summary }}
diff --git a/master/templates/layout.html b/master/templates/layout.html --- a/master/templates/layout.html +++ b/master/templates/layout.html @@ -23,19 +23,19 @@ {% block header -%}
Home - - - Speed - Summary (trunk) - Summary - Nightly builds + - Speed + - Numpy compatability + - Summary (trunk) + - Summary + - Nightly builds - Waterfall + - Waterfall - Builders + - Builders From noreply at buildbot.pypy.org Mon Nov 11 21:59:13 2013 From: noreply at buildbot.pypy.org (ltratt) Date: Mon, 11 Nov 2013 21:59:13 +0100 (CET) Subject: [pypy-commit] pypy more_strategies: Remove pointless print statements. Message-ID: <20131111205913.719DE1C0144@cobra.cs.uni-duesseldorf.de> Author: Laurence Tratt Branch: more_strategies Changeset: r67959:3a4f3f694fe9 Date: 2013-11-11 15:29 +0000 http://bitbucket.org/pypy/pypy/changeset/3a4f3f694fe9/ Log: Remove pointless print statements. Presumably these are long forgotten debugging aids. diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1187,7 +1187,6 @@ skip("not reliable on top of Boehm") class A(object): def __del__(self): - print 'del' del lst[:] for i in range(10): keepalive = [] @@ -1257,7 +1256,6 @@ (dict, []), (dict, [(5,6)]), (dict, [('x',7)]), (dict, [(X,8)]), (dict, [(u'x', 7)]), ]: - print base, arg class SubClass(base): def __iter__(self): return iter("foobar") From noreply at buildbot.pypy.org Mon Nov 11 21:59:16 2013 From: noreply at buildbot.pypy.org (ltratt) Date: Mon, 11 Nov 2013 21:59:16 +0100 (CET) Subject: [pypy-commit] pypy more_strategies: Merge default. Message-ID: <20131111205916.89DC71C0144@cobra.cs.uni-duesseldorf.de> Author: Laurence Tratt Branch: more_strategies Changeset: r67960:21ecaa0104be Date: 2013-11-11 20:47 +0000 http://bitbucket.org/pypy/pypy/changeset/21ecaa0104be/ Log: Merge default. diff too long, truncating to 2000 out of 4612 lines diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -4,6 +4,21 @@ from __pypy__.builders import StringBuilder, UnicodeBuilder +class StringOrUnicodeBuilder(object): + def __init__(self): + self._builder = StringBuilder() + def append(self, string): + try: + self._builder.append(string) + except UnicodeEncodeError: + ub = UnicodeBuilder() + ub.append(self._builder.build()) + self._builder = ub + ub.append(string) + def build(self): + return self._builder.build() + + ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') @@ -192,7 +207,7 @@ if self.ensure_ascii: builder = StringBuilder() else: - builder = UnicodeBuilder() + builder = StringOrUnicodeBuilder() self.__encode(o, markers, builder, 0) return builder.build() diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -48,7 +48,14 @@ def remove(wr, selfref=ref(self)): self = selfref() if self is not None: - del self.data[wr.key] + # Changed this for PyPy: made more resistent. The + # issue is that in some corner cases, self.data + # might already be changed or removed by the time + # this weakref's callback is called. If that is + # the case, we don't want to randomly kill an + # unrelated entry. + if self.data.get(wr.key) is wr: + del self.data[wr.key] self._remove = remove UserDict.UserDict.__init__(self, *args, **kw) @@ -160,22 +167,26 @@ try: o = self.data.pop(key)() except KeyError: + o = None + if o is None: if args: return args[0] - raise - if o is None: raise KeyError, key else: return o + # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: - wr = self.data[key] + o = self.data[key]() except KeyError: + o = None + if o is None: self.data[key] = KeyedRef(default, self._remove, key) return default else: - return wr() + return o + # The logic above was fixed in PyPy def update(self, dict=None, **kwargs): d = self.data diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.7 +Version: 0.8 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7.2" -__version_info__ = (0, 7, 2) +__version__ = "0.8" +__version_info__ = (0, 8) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,5 @@ import types +from .lock import allocate_lock try: callable @@ -61,6 +62,7 @@ # rely on it! It's probably not going to work well.) self._backend = backend + self._lock = allocate_lock() self._parser = cparser.Parser() self._cached_btypes = {} self._parsed_types = types.ModuleType('parsed_types').__dict__ @@ -74,7 +76,8 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - self.BVoidP = self._get_cached_btype(model.voidp_type) + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -95,11 +98,12 @@ if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') - self._parser.parse(csource, override=override) - self._cdefsources.append(csource) - if override: - for cache in self._function_caches: - cache.clear() + with self._lock: + self._parser.parse(csource, override=override) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -109,31 +113,47 @@ library we only look for the actual (untyped) symbols. """ assert isinstance(name, basestring) or name is None - lib, function_cache = _make_ffi_library(self, name, flags) - self._function_caches.append(function_cache) - self._libraries.append(lib) + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) return lib + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + really_a_function_type = type.is_raw_function + if really_a_function_type: + type = type.as_function_pointer() + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, cfaf = self._parsed_types[cdecl] - if consider_function_as_funcptr and not cfaf: - raise KeyError + result = self._parsed_types[cdecl] except KeyError: - key = cdecl - if not isinstance(cdecl, str): # unicode, on Python 2 - cdecl = cdecl.encode('ascii') - cfaf = consider_function_as_funcptr - type = self._parser.parse_type(cdecl, - consider_function_as_funcptr=cfaf) - btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, cfaf + with self._lock: + result = self._typeof_locked(cdecl) + # + btype, really_a_function_type = result + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) return btype def typeof(self, cdecl): """Parse the C type given as a string and return the - corresponding Python type: '>. + corresponding object. It can also be used on 'cdata' instance to get its C type. """ if isinstance(cdecl, basestring): @@ -144,6 +164,10 @@ res = _builtin_function_type(cdecl) if res is not None: return res + if (isinstance(cdecl, types.FunctionType) + and hasattr(cdecl, '_cffi_base_type')): + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) def sizeof(self, cdecl): @@ -280,14 +304,17 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + with self._lock: + try: + gc_weakrefs = self.gc_weakrefs + except AttributeError: + from .gc_weakref import GcWeakrefs + gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) + return gc_weakrefs.build(cdata, destructor) def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! try: BType = self._cached_btypes[type] except KeyError: @@ -322,7 +349,8 @@ def _pointer_to(self, ctype): from . import model - return model.pointer_cache(self, ctype) + with self._lock: + return model.pointer_cache(self, ctype) def addressof(self, cdata, field=None): """Return the address of a . @@ -342,10 +370,12 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ - self._parser.include(ffi_to_include._parser) - self._cdefsources.append('[') - self._cdefsources.extend(ffi_to_include._cdefsources) - self._cdefsources.append(']') + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) @@ -372,7 +402,7 @@ backendlib = backend.load_library(path, flags) copied_enums = [] # - def make_accessor(name): + def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: tp = ffi._parser._declarations[key] @@ -404,11 +434,17 @@ if enumname not in library.__dict__: library.__dict__[enumname] = enumval copied_enums.append(True) + if name in library.__dict__: + return # - if name in library.__dict__: # copied from an enum value just above, - return # or multithread's race condition raise AttributeError(name) # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + make_accessor_locked(name) + # class FFILibrary(object): def __getattr__(self, name): make_accessor(name) @@ -444,4 +480,5 @@ except (KeyError, AttributeError, TypeError): return None else: - return ffi._get_cached_btype(tp) + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -142,7 +142,7 @@ if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] if line: - msg = 'cannot parse "%s"\n%s' % (line, msg) + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) @@ -217,19 +217,18 @@ # if decl.name: tp = self._get_type(node, partial_length_ok=True) - if self._is_constant_declaration(node): + if self._is_constant_globalvar(node): self._declare('constant ' + decl.name, tp) else: self._declare('variable ' + decl.name, tp) - def parse_type(self, cdecl, consider_function_as_funcptr=False): + def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type, - consider_function_as_funcptr=consider_function_as_funcptr) + return self._get_type(exprnode.type) def _declare(self, name, obj): if name in self._declarations: @@ -249,28 +248,17 @@ return model.ConstPointerType(type) return model.PointerType(type) - def _get_type(self, typenode, convert_array_to_pointer=False, - name=None, partial_length_ok=False, - consider_function_as_funcptr=False): + def _get_type(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): type = self._declarations['typedef ' + typenode.type.names[0]] - if isinstance(type, model.ArrayType): - if convert_array_to_pointer: - return type.item - else: - if (consider_function_as_funcptr and - isinstance(type, model.RawFunctionType)): - return type.as_function_pointer() return type # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type - if convert_array_to_pointer: - return self._get_type_pointer(self._get_type(typenode.type)) if typenode.dim is None: length = None else: @@ -331,10 +319,7 @@ # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - result = self._parse_function_type(typenode, name) - if consider_function_as_funcptr: - result = result.as_function_pointer() - return result + return self._parse_function_type(typenode, name) # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): @@ -365,21 +350,24 @@ isinstance(params[0].type.type, pycparser.c_ast.IdentifierType) and list(params[0].type.type.names) == ['void']): del params[0] - args = [self._get_type(argdeclnode.type, - convert_array_to_pointer=True, - consider_function_as_funcptr=True) + args = [self._as_func_arg(self._get_type(argdeclnode.type)) for argdeclnode in params] result = self._get_type(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _is_constant_declaration(self, typenode, const=False): - if isinstance(typenode, pycparser.c_ast.ArrayDecl): - return self._is_constant_declaration(typenode.type) + def _as_func_arg(self, type): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _is_constant_globalvar(self, typenode): if isinstance(typenode, pycparser.c_ast.PtrDecl): - const = 'const' in typenode.quals - return self._is_constant_declaration(typenode.type, const) + return 'const' in typenode.quals if isinstance(typenode, pycparser.c_ast.TypeDecl): - return const or 'const' in typenode.quals + return 'const' in typenode.quals return False def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): @@ -491,7 +479,7 @@ return tp def _make_partial(self, tp, nested): - if not isinstance(tp, model.StructType): + if not isinstance(tp, model.StructOrUnion): raise api.CDefError("%s cannot be partial" % (tp,)) if not tp.has_c_name() and not nested: raise NotImplementedError("%s is partial but has no C name" %(tp,)) @@ -511,7 +499,7 @@ if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): self._partial_length = True - return None + return '...' # raise api.FFIError("unsupported expression: expected a " "simple numeric constant") diff --git a/lib_pypy/cffi/gc_weakref.py b/lib_pypy/cffi/gc_weakref.py --- a/lib_pypy/cffi/gc_weakref.py +++ b/lib_pypy/cffi/gc_weakref.py @@ -14,6 +14,6 @@ def build(self, cdata, destructor): # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi.typeof(cdata), cdata) + new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) self.data[ref(new_cdata, self.remove)] = destructor, cdata return new_cdata diff --git a/lib_pypy/cffi/lock.py b/lib_pypy/cffi/lock.py new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,7 +1,10 @@ import weakref +from .lock import allocate_lock + class BaseTypeByIdentity(object): is_array_type = False + is_raw_function = False def get_c_name(self, replace_with='', context='a C file'): result = self.c_name_with_marker @@ -146,6 +149,7 @@ # a function, but not a pointer-to-function. The backend has no # notion of such a type; it's used temporarily by parsing. _base_pattern = '(&)(%s)' + is_raw_function = True def build_backend_type(self, ffi, finishlist): from . import api @@ -192,10 +196,6 @@ _base_pattern = " const *&" _base_pattern_array = "(const *&)" - def build_backend_type(self, ffi, finishlist): - BPtr = PointerType(self.totype).get_cached_btype(ffi, finishlist) - return BPtr - const_voidp_type = ConstPointerType(void_type) @@ -216,10 +216,12 @@ self.item = item self.length = length # - if self.length is None: + if length is None: brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' else: - brackets = '&[%d]' % self.length + brackets = '&[%d]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) @@ -227,6 +229,10 @@ return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): + if self.length == '...': + from . import api + raise api.CDefError("cannot render the type %r: unknown length" % + (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) @@ -252,6 +258,7 @@ class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None completed = False + partial = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -303,20 +310,21 @@ return # not completing it: it's an opaque struct # self.completed = 1 - fldtypes = tuple(tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes) # if self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) ffi._backend.complete_struct_or_union(BType, lst, self) # else: + fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # - if isinstance(ftype, ArrayType) and ftype.length is None: + if isinstance(ftype, ArrayType) and ftype.length == '...': # fix the length to match the total size BItemType = ftype.item.get_cached_btype(ffi, finishlist) nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) @@ -327,18 +335,20 @@ ftype = ftype.resolve_length(nlen) self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) - BArrayType = ftype.get_cached_btype(ffi, finishlist) - fldtypes = (fldtypes[:i] + (BArrayType,) + - fldtypes[i+1:]) - continue # - bitemsize = ffi.sizeof(fldtypes[i]) - if bitemsize != fsize: - self._verification_error( - "field '%s.%s' is declared as %d bytes, but is " - "really %d bytes" % (self.name, - self.fldnames[i] or '{}', - bitemsize, fsize)) + BFieldType = ftype.get_cached_btype(ffi, finishlist) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) + fldtypes.append(BFieldType) + # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) ffi._backend.complete_struct_or_union(BType, lst, self, totalsize, totalalignment) @@ -348,11 +358,6 @@ from .ffiplatform import VerificationError raise VerificationError(msg) - -class StructType(StructOrUnion): - kind = 'struct' - partial = False - def check_not_partial(self): if self.partial and self.fixedlayout is None: from . import ffiplatform @@ -361,19 +366,18 @@ def build_backend_type(self, ffi, finishlist): self.check_not_partial() finishlist.append(self) - - return global_cache(self, ffi, 'new_struct_type', + # + return global_cache(self, ffi, 'new_%s_type' % self.kind, self.get_official_name(), key=self) +class StructType(StructOrUnion): + kind = 'struct' + + class UnionType(StructOrUnion): kind = 'union' - def build_backend_type(self, ffi, finishlist): - finishlist.append(self) - return global_cache(self, ffi, 'new_union_type', - self.get_official_name(), key=self) - class EnumType(StructOrUnionOrEnum): kind = 'enum' @@ -387,6 +391,12 @@ self.baseinttype = baseinttype self.build_c_name_with_marker() + def force_the_name(self, forcename): + StructOrUnionOrEnum.force_the_name(self, forcename) + if self.forcename is None: + name = self.get_official_name() + self.forcename = '$' + name.replace(' ', '_') + def check_not_partial(self): if self.partial and not self.partial_resolved: from . import ffiplatform @@ -444,6 +454,9 @@ tp = StructType(structname, None, None, None) return NamedPointerType(tp, name) + +global_lock = allocate_lock() + def global_cache(srctype, ffi, funcname, *args, **kwds): key = kwds.pop('key', (funcname, args)) assert not kwds @@ -464,8 +477,17 @@ res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: raise NotImplementedError("%r: %s" % (srctype, e)) - ffi._backend.__typecache[key] = res - return res + # note that setdefault() on WeakValueDictionary is not atomic + # and contains a rare bug (http://bugs.python.org/issue19542); + # we have to use a lock and do it ourselves + cache = ffi._backend.__typecache + with global_lock: + res1 = cache.get(key) + if res1 is None: + cache[key] = res + return res + else: + return res1 def pointer_cache(ffi, BType): return global_cache('?', ffi, 'new_pointer_type', BType) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -15,7 +15,7 @@ def patch_extension_kwds(self, kwds): pass - def find_module(self, module_name, path, so_suffix): + def find_module(self, module_name, path, so_suffixes): try: f, filename, descr = imp.find_module(module_name, path) except ImportError: @@ -25,7 +25,7 @@ # Note that after a setuptools installation, there are both .py # and .so files with the same basename. The code here relies on # imp.find_module() locating the .so in priority. - if descr[0] != so_suffix: + if descr[0] not in so_suffixes: return None return filename @@ -160,7 +160,10 @@ def __dir__(self): return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() - module._cffi_setup(lst, ffiplatform.VerificationError, library) + if module._cffi_setup(lst, ffiplatform.VerificationError, library): + import warnings + warnings.warn("reimporting %r might overwrite older definitions" + % (self.verifier.get_module_name())) # # finally, call the loaded_cpy_xxx() functions. This will perform # the final adjustments, like copying the Python->C wrapper @@ -280,8 +283,8 @@ return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, model.ArrayType): - return '_cffi_from_c_deref((char *)%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) elif isinstance(tp, model.StructType): if tp.fldnames is None: raise TypeError("'%s' is used as %s, but is opaque" % ( @@ -464,11 +467,14 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return _cffi_get_struct_layout(nums);') @@ -491,7 +497,7 @@ # function = getattr(module, layoutfuncname) layout = function() - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -528,9 +534,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -566,7 +573,7 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True): + vartp=None, delayed=True, size_too=False): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -597,6 +604,15 @@ '(unsigned long long)(%s));' % (name,)) prnt(' if (o == NULL)') prnt(' return -1;') + if size_too: + prnt(' {') + prnt(' PyObject *o1 = o;') + prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));' + % (name,)) + prnt(' Py_DECREF(o1);') + prnt(' if (o == NULL)') + prnt(' return -1;') + prnt(' }') prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name) prnt(' Py_DECREF(o);') prnt(' if (res < 0)') @@ -633,12 +649,23 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %d, ' - 'not %d",') - prnt(' "%s", "%s", (int)%s, %d);' % ( - name, enumerator, enumerator, enumvalue)) + prnt(' "enum %s: %s has the real value %s, ' + 'not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + name, enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return %s;' % self._chained_list_constants[True]) @@ -677,15 +704,16 @@ def _generate_cpy_variable_collecttype(self, tp, name): if isinstance(tp, model.ArrayType): - self._do_collect_type(tp) + tp_ptr = model.PointerType(tp.item) else: tp_ptr = model.PointerType(tp) - self._do_collect_type(tp_ptr) + self._do_collect_type(tp_ptr) def _generate_cpy_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): tp_ptr = model.PointerType(tp.item) - self._generate_cpy_const(False, name, tp, vartp=tp_ptr) + self._generate_cpy_const(False, name, tp, vartp=tp_ptr, + size_too = (tp.length == '...')) else: tp_ptr = model.PointerType(tp) self._generate_cpy_const(False, name, tp_ptr, category='var') @@ -694,11 +722,29 @@ _loading_cpy_variable = _loaded_noop def _loaded_cpy_variable(self, tp, name, module, library): + value = getattr(library, name) if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the - return # sense that "a=..." is forbidden + # sense that "a=..." is forbidden + if tp.length == '...': + assert isinstance(value, tuple) + (value, size) = value + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. - ptr = getattr(library, name) + ptr = value delattr(library, name) def getter(library): return ptr[0] @@ -711,12 +757,9 @@ def _generate_setup_custom(self): prnt = self._prnt - prnt('static PyObject *_cffi_setup_custom(PyObject *lib)') + prnt('static int _cffi_setup_custom(PyObject *lib)') prnt('{') - prnt(' if (%s < 0)' % self._chained_list_constants[True]) - prnt(' return NULL;') - prnt(' Py_INCREF(Py_None);') - prnt(' return Py_None;') + prnt(' return %s;' % self._chained_list_constants[True]) prnt('}') cffimod_header = r''' @@ -834,17 +877,20 @@ static void *_cffi_exports[_CFFI_NUM_EXPORTS]; static PyObject *_cffi_types, *_cffi_VerificationError; -static PyObject *_cffi_setup_custom(PyObject *lib); /* forward */ +static int _cffi_setup_custom(PyObject *lib); /* forward */ static PyObject *_cffi_setup(PyObject *self, PyObject *args) { PyObject *library; + int was_alive = (_cffi_types != NULL); if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, &library)) return NULL; Py_INCREF(_cffi_types); Py_INCREF(_cffi_VerificationError); - return _cffi_setup_custom(library); + if (_cffi_setup_custom(library) < 0) + return NULL; + return PyBool_FromLong(was_alive); } static void _cffi_init(void) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -20,15 +20,15 @@ # up in kwds['export_symbols']. kwds.setdefault('export_symbols', self.export_symbols) - def find_module(self, module_name, path, so_suffix): - basename = module_name + so_suffix - if path is None: - path = sys.path - for dirname in path: - filename = os.path.join(dirname, basename) - if os.path.isfile(filename): - return filename - return None + def find_module(self, module_name, path, so_suffixes): + for so_suffix in so_suffixes: + basename = module_name + so_suffix + if path is None: + path = sys.path + for dirname in path: + filename = os.path.join(dirname, basename) + if os.path.isfile(filename): + return filename def collect_types(self): pass # not needed in the generic engine @@ -173,6 +173,7 @@ newfunction = self._load_constant(False, tp, name, module) else: indirections = [] + base_tp = tp if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): indirect_args = [] for i, typ in enumerate(tp.args): @@ -186,16 +187,18 @@ wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) for i, typ in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, typ) + newfunction = self._make_struct_wrapper(newfunction, i, typ, + base_tp) setattr(library, name, newfunction) type(library)._cffi_dir.append(name) - def _make_struct_wrapper(self, oldfunc, i, tp): + def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): backend = self.ffi._backend BType = self.ffi._get_cached_btype(tp) def newfunc(*args): args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] return oldfunc(*args) + newfunc._cffi_base_type = base_tp return newfunc # ---------- @@ -252,11 +255,14 @@ prnt(' static ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return nums[i];') @@ -270,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi.typeof("ssize_t(*)(ssize_t)") + BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -279,7 +285,7 @@ if x < 0: break layout.append(x) num += 1 - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -316,9 +322,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -379,15 +386,17 @@ def _load_constant(self, is_int, tp, name, module): funcname = '_cffi_const_%s' % name if is_int: - BFunc = self.ffi.typeof("int(*)(long long*)") + BType = self.ffi._typeof_locked("long long*")[0] + BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) - p = self.ffi.new("long long*") + p = self.ffi.new(BType) negative = function(p) value = int(p[0]) if value < 0 and not negative: - value += (1 << (8*self.ffi.sizeof("long long"))) + BLongLong = self.ffi._typeof_locked("long long")[0] + value += (1 << (8*self.ffi.sizeof(BLongLong))) else: - BFunc = self.ffi.typeof(tp.get_c_name('(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() return value @@ -413,11 +422,22 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' snprintf(out_error, 255,' - '"%s has the real value %d, not %d",') - prnt(' "%s", (int)%s, %d);' % ( - enumerator, enumerator, enumvalue)) + ' "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % ( + enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') @@ -431,10 +451,11 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BFunc = self.ffi.typeof("int(*)(char*)") + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = '_cffi_e_%s_%s' % (prefix, name) function = module.load_function(BFunc, funcname) - p = self.ffi.new("char[]", 256) + p = self.ffi.new(BType, 256) if function(p) < 0: error = self.ffi.string(p) if sys.version_info >= (3,): @@ -465,6 +486,14 @@ def _generate_gen_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): + if tp.length == '...': + prnt = self._prnt + funcname = '_cffi_sizeof_%s' % (name,) + self.export_symbols.append(funcname) + prnt("size_t %s(void)" % funcname) + prnt("{") + prnt(" return sizeof(%s);" % (name,)) + prnt("}") tp_ptr = model.PointerType(tp.item) self._generate_gen_const(False, name, tp_ptr) else: @@ -476,6 +505,18 @@ def _loaded_gen_variable(self, tp, name, module, library): if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the # sense that "a=..." is forbidden + if tp.length == '...': + funcname = '_cffi_sizeof_%s' % (name,) + BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0] + function = module.load_function(BFunc, funcname) + size = function() + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) tp_ptr = model.PointerType(tp.item) value = self._load_constant(False, tp_ptr, name, module) # 'value' is a which we have to replace with @@ -489,7 +530,7 @@ # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. funcname = '_cffi_var_%s' % name - BFunc = self.ffi.typeof(tp.get_c_name('*(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0] function = module.load_function(BFunc, funcname) ptr = function() def getter(library): diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -31,7 +31,7 @@ k2 = k2.lstrip('0').rstrip('L') modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key, k1, k2) - suffix = _get_so_suffix() + suffix = _get_so_suffixes()[0] self.tmpdir = tmpdir or _caller_dir_pycache() self.sourcefilename = os.path.join(self.tmpdir, modulename + '.c') self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) @@ -42,18 +42,21 @@ def write_source(self, file=None): """Write the C source code. It is produced in 'self.sourcefilename', which can be tweaked beforehand.""" - if self._has_source and file is None: - raise ffiplatform.VerificationError("source code already written") - self._write_source(file) + with self.ffi._lock: + if self._has_source and file is None: + raise ffiplatform.VerificationError( + "source code already written") + self._write_source(file) def compile_module(self): """Write the C source code (if not done already) and compile it. This produces a dynamic link library in 'self.modulefilename'.""" - if self._has_module: - raise ffiplatform.VerificationError("module already compiled") - if not self._has_source: - self._write_source() - self._compile_module() + with self.ffi._lock: + if self._has_module: + raise ffiplatform.VerificationError("module already compiled") + if not self._has_source: + self._write_source() + self._compile_module() def load_library(self): """Get a C module from this Verifier instance. @@ -62,11 +65,14 @@ operations to the C module. If necessary, the C code is written and compiled first. """ - if not self._has_module: - self._locate_module() + with self.ffi._lock: if not self._has_module: - self.compile_module() - return self._load_library() + self._locate_module() + if not self._has_module: + if not self._has_source: + self._write_source() + self._compile_module() + return self._load_library() def get_module_name(self): basename = os.path.basename(self.modulefilename) @@ -81,7 +87,9 @@ def get_extension(self): if not self._has_source: - self._write_source() + with self.ffi._lock: + if not self._has_source: + self._write_source() sourcename = ffiplatform.maybe_relative_path(self.sourcefilename) modname = self.get_module_name() return ffiplatform.get_extension(sourcename, modname, **self.kwds) @@ -103,7 +111,7 @@ else: path = None filename = self._vengine.find_module(self.get_module_name(), path, - _get_so_suffix()) + _get_so_suffixes()) if filename is None: return self.modulefilename = filename @@ -193,7 +201,7 @@ if keep_so: suffix = '.c' # only remove .c files else: - suffix = _get_so_suffix().lower() + suffix = _get_so_suffixes()[0].lower() for fn in filelist: if fn.lower().startswith('_cffi_') and ( fn.lower().endswith(suffix) or fn.lower().endswith('.c')): @@ -213,15 +221,20 @@ except OSError: pass -def _get_so_suffix(): +def _get_so_suffixes(): + suffixes = [] for suffix, mode, type in imp.get_suffixes(): if type == imp.C_EXTENSION: - return suffix - # bah, no C_EXTENSION available. Occurs on pypy without cpyext - if sys.platform == 'win32': - return ".pyd" - else: - return ".so" + suffixes.append(suffix) + + if not suffixes: + # bah, no C_EXTENSION available. Occurs on pypy without cpyext + if sys.platform == 'win32': + suffixes = [".pyd"] + else: + suffixes = [".so"] + + return suffixes def _ensure_dir(filename): try: diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -19,11 +19,13 @@ # CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -import types +import types, re from pyrepl import unicodedata_ from pyrepl import commands from pyrepl import input +_r_csi_seq = re.compile(r"\033\[[ -@]*[A-~]") + def _make_unctrl_map(): uc_map = {} for c in map(unichr, range(256)): @@ -309,6 +311,10 @@ excluded from the length calculation. So also a copy of the prompt is returned with these control characters removed. """ + # The logic below also ignores the length of common escape + # sequences if they were not explicitly within \x01...\x02. + # They are CSI (or ANSI) sequences ( ESC [ ... LETTER ) + out_prompt = '' l = len(prompt) pos = 0 @@ -321,9 +327,13 @@ break # Found start and end brackets, subtract from string length l = l - (e-s+1) - out_prompt += prompt[pos:s] + prompt[s+1:e] + keep = prompt[pos:s] + l -= sum(map(len, _r_csi_seq.findall(keep))) + out_prompt += keep + prompt[s+1:e] pos = e+1 - out_prompt += prompt[pos:] + keep = prompt[pos:] + l -= sum(map(len, _r_csi_seq.findall(keep))) + out_prompt += keep return out_prompt, l def bow(self, p=None): diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -17,24 +17,24 @@ David Schneider Holger Krekel Christian Tismer + Matti Picus Hakan Ardo Benjamin Peterson - Matti Picus Philip Jenvey Anders Chrigstrom Brian Kearns + Manuel Jacob Eric van Riet Paap + Wim Lavrijsen Richard Emslie Alexander Schremmer - Wim Lavrijsen Dan Villiom Podlaski Christiansen - Manuel Jacob + Ronan Lamy Lukas Diekmann Sven Hager Anders Lehmann Aurelien Campeas Niklaus Haldimann - Ronan Lamy Camillo Bruni Laura Creighton Toon Verwaest @@ -45,8 +45,10 @@ David Edelsohn Anders Hammarquist Jakub Gustak + Romain Guillebert Guido Wesdorp Lawrence Oluyede + Remi Meier Bartosz Skowron Daniel Roberts Niko Matsakis @@ -54,18 +56,17 @@ Ludovic Aubry Alexander Hesse Jacob Hallen - Romain Guillebert Jason Creighton Alex Martelli Michal Bendowski Jan de Mooij + stian Michael Foord Stephan Diehl Stefan Schwarzer Valentino Volonghi Tomek Meka Patrick Maupin - stian Bob Ippolito Bruno Gola Jean-Paul Calderone @@ -74,29 +75,33 @@ Simon Burton Marius Gedminas John Witulski + Konstantin Lopuhin Greg Price Dario Bertini Mark Pearse Simon Cross - Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy Adrian Kuhn Boris Feigin + Stefano Rivera tav + Taavi Burns Georg Brandl Bert Freudenberg Stian Andreassen - Stefano Rivera Wanja Saatkamp Gerald Klix Mike Blume - Taavi Burns Oscar Nierstrasz + Stefan H. Muller + Laurence Tratt + Rami Chowdhury David Malcolm Eugene Oden Henry Mason @@ -105,14 +110,15 @@ David Ripton Dusty Phillips Lukas Renggli + Edd Barrett Guenter Jantzen Tobias Oberstein - Remi Meier Ned Batchelder Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin + Andrew Chambers Michael Schneider Nicholas Riley Jason Chu @@ -128,10 +134,13 @@ Olivier Dormond Jared Grubb Karl Bartel + Tobias Pape Brian Dorsey Victor Stinner + Andrews Medina Stuart Williams Jasper Schulz + Christian Hudon Toby Watson Antoine Pitrou Aaron Iles @@ -141,7 +150,6 @@ Neil Shepperd Mikael Schönenberg Elmo Mäntynen - Tobias Pape Jonathan David Riehl Stanislaw Halik Anders Qvist @@ -153,19 +161,15 @@ Alexander Sedov Corbin Simpson Christopher Pope - Laurence Tratt - Guillebert Romain Christian Tismer Dan Stromberg Stefano Parmesan - Christian Hudon Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang Gabriel - Paweł Piotr Przeradowski Andrew Dalke Sylvain Thenault Nathan Taylor @@ -189,13 +193,15 @@ Martin Blais Lene Wagner Tomo Cocoa - Andrews Medina roberto at goyle + Yury V. Zaytsev + Anna Katrina Dominguez William Leslie Bobby Impollonia timo at eistee.fritz.box Andrew Thompson Yusei Tahara + Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado Godefroid Chappelle @@ -209,7 +215,7 @@ Akira Li Gustavo Niemeyer Stephan Busemann - Anna Katrina Dominguez + Rafał Gałczyński Christian Muirhead James Lan shoma hosaka @@ -219,6 +225,7 @@ Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + w31rd0 Jim Baker Rodrigo Araújo Armin Ronacher @@ -234,12 +241,12 @@ Even Wiik Thomassen jbs soareschen + Mike Bayer Flavio Percoco Kristoffer Kleine yasirs Michael Chermside Anna Ravencroft - Andrew Chambers Julien Phalip Dan Loewenherz diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -137,7 +137,8 @@ control flow of a function (such as ``while`` and ``try`` constructs) - a value stack where bytecode interpretation pulls object - from and puts results on. + from and puts results on. (``locals_stack_w`` is actually a single + list containing both the local scope and the value stack.) - a reference to the *globals* dictionary, containing module-level name-value bindings @@ -151,10 +152,7 @@ - the class ``PyFrame`` is defined in `pypy/interpreter/pyframe.py`_. -- the file `pypy/interpreter/pyopcode.py`_ add support for all Python opcode. - -- nested scope support is added to the ``PyFrame`` class in - `pypy/interpreter/nestedscope.py`_. +- the file `pypy/interpreter/pyopcode.py`_ add support for all Python opcodes. .. _Code: @@ -184,12 +182,6 @@ * ``co_name`` name of the code object (often the function name) * ``co_lnotab`` a helper table to compute the line-numbers corresponding to bytecodes -In PyPy, code objects also have the responsibility of creating their Frame_ objects -via the `'create_frame()`` method. With proper parser and compiler support this would -allow to create custom Frame objects extending the execution of functions -in various ways. The several Frame_ classes already utilize this flexibility -in order to implement Generators and Nested Scopes. - .. _Function: Function and Method classes diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py --- a/pypy/doc/tool/makecontributor.py +++ b/pypy/doc/tool/makecontributor.py @@ -1,3 +1,5 @@ +# NOTE: run this script with LANG=en_US.UTF-8 + import py import sys from collections import defaultdict @@ -132,7 +134,7 @@ if show_numbers: print '%5d %s' % (n, name) else: - print name + print ' ' + name if __name__ == '__main__': show_numbers = '-n' in sys.argv diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -91,6 +91,7 @@ .. branch: safe-win-mmap .. branch: boolean-indexing-cleanup .. branch: cpyext-best_base +.. branch: cpyext-int .. branch: fileops2 .. branch: nobold-backtrace diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -56,7 +56,7 @@ interrupted = [] print('--- start ---') thread.start_new_thread(subthread, ()) - for j in range(10): + for j in range(30): if len(done): break print('.') time.sleep(0.25) diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -7,7 +7,7 @@ appleveldefs = { } interpleveldefs = { - '__version__': 'space.wrap("0.7")', + '__version__': 'space.wrap("0.8")', 'load_library': 'libraryobj.load_library', diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -19,9 +19,9 @@ _cdata = lltype.nullptr(rffi.CCHARP.TO) def __init__(self, space, cdata, ctype): - from pypy.module._cffi_backend import ctypeprim + from pypy.module._cffi_backend import ctypeobj assert lltype.typeOf(cdata) == rffi.CCHARP - assert isinstance(ctype, ctypeprim.W_CType) + assert isinstance(ctype, ctypeobj.W_CType) self.space = space self._cdata = cdata # don't forget keepalive_until_here! self.ctype = ctype @@ -211,7 +211,21 @@ keepalive_until_here(w_value) return # + # A fast path for [0:N] = "somestring". + from pypy.module._cffi_backend import ctypeprim space = self.space + if (space.isinstance_w(w_value, space.w_str) and + isinstance(ctitem, ctypeprim.W_CTypePrimitiveChar)): + from rpython.rtyper.annlowlevel import llstr + from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw + value = space.str_w(w_value) + if len(value) != length: + raise operationerrfmt(space.w_ValueError, + "need a string of length %d, got %d", + length, len(value)) + copy_string_to_raw(llstr(value), cdata, 0, length) + return + # w_iter = space.iter(w_value) for i in range(length): try: @@ -245,19 +259,22 @@ space = self.space if isinstance(w_other, W_CData): from pypy.module._cffi_backend import ctypeptr, ctypearray + from pypy.module._cffi_backend import ctypevoid ct = w_other.ctype if isinstance(ct, ctypearray.W_CTypeArray): ct = ct.ctptr # if (ct is not self.ctype or not isinstance(ct, ctypeptr.W_CTypePointer) or - ct.ctitem.size <= 0): + (ct.ctitem.size <= 0 and not ct.is_void_ptr)): raise operationerrfmt(space.w_TypeError, "cannot subtract cdata '%s' and cdata '%s'", self.ctype.name, ct.name) # + itemsize = ct.ctitem.size + if itemsize <= 0: itemsize = 1 diff = (rffi.cast(lltype.Signed, self._cdata) - - rffi.cast(lltype.Signed, w_other._cdata)) // ct.ctitem.size + rffi.cast(lltype.Signed, w_other._cdata)) // itemsize return space.wrap(diff) # return self._add_or_sub(w_other, -1) @@ -441,6 +458,7 @@ __getitem__ = interp2app(W_CData.getitem), __setitem__ = interp2app(W_CData.setitem), __add__ = interp2app(W_CData.add), + __radd__ = interp2app(W_CData.add), __sub__ = interp2app(W_CData.sub), __getattr__ = interp2app(W_CData.getattr), __setattr__ = interp2app(W_CData.setattr), diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -34,19 +34,8 @@ datasize = self.size # if datasize < 0: - if (space.isinstance_w(w_init, space.w_list) or - space.isinstance_w(w_init, space.w_tuple)): - length = space.int_w(space.len(w_init)) - elif space.isinstance_w(w_init, space.w_basestring): - # from a string, we add the null terminator - length = space.int_w(space.len(w_init)) + 1 - else: - length = space.getindex_w(w_init, space.w_OverflowError) - if length < 0: - raise OperationError(space.w_ValueError, - space.wrap("negative array length")) - w_init = space.w_None - # + from pypy.module._cffi_backend import misc + w_init, length = misc.get_new_array_length(space, w_init) try: datasize = ovfcheck(length * self.ctitem.size) except OverflowError: diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -2,27 +2,25 @@ Pointers. """ -from pypy.interpreter.error import OperationError, operationerrfmt, wrap_oserror - from rpython.rlib import rposix from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import ovfcheck +from rpython.rtyper.annlowlevel import llstr, llunicode from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw, copy_unicode_to_raw +from pypy.interpreter.error import OperationError, operationerrfmt, wrap_oserror from pypy.module._cffi_backend import cdataobj, misc, ctypeprim, ctypevoid from pypy.module._cffi_backend.ctypeobj import W_CType class W_CTypePtrOrArray(W_CType): - _attrs_ = ['ctitem', 'can_cast_anything', 'is_struct_ptr', - 'length'] - _immutable_fields_ = ['ctitem', 'can_cast_anything', 'is_struct_ptr', - 'length'] + _attrs_ = ['ctitem', 'can_cast_anything', 'length'] + _immutable_fields_ = ['ctitem', 'can_cast_anything', 'length'] length = -1 def __init__(self, space, size, extra, extra_position, ctitem, could_cast_anything=True): - from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion name, name_position = ctitem.insert_name(extra, extra_position) W_CType.__init__(self, space, size, name, name_position) # this is the "underlying type": @@ -31,7 +29,6 @@ # - for functions, it is the return type self.ctitem = ctitem self.can_cast_anything = could_cast_anything and ctitem.cast_anything - self.is_struct_ptr = isinstance(ctitem, W_CTypeStructOrUnion) def is_char_ptr_or_array(self): return isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveChar) @@ -90,8 +87,7 @@ "initializer string is too long for '%s'" " (got %d characters)", self.name, n) - for i in range(n): - cdata[i] = s[i] + copy_string_to_raw(llstr(s), cdata, 0, n) if n != self.length: cdata[n] = '\x00' elif isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveUniChar): @@ -105,8 +101,7 @@ " (got %d characters)", self.name, n) unichardata = rffi.cast(rffi.CWCHARP, cdata) - for i in range(n): - unichardata[i] = s[i] + copy_unicode_to_raw(llunicode(s), unichardata, 0, n) if n != self.length: unichardata[n] = u'\x00' else: @@ -157,7 +152,6 @@ return cdataobj.W_CData(self.space, ptrdata, self) def convert_from_object(self, cdata, w_ob): - space = self.space if not isinstance(w_ob, cdataobj.W_CData): raise self._convert_error("cdata pointer", w_ob) other = w_ob.ctype @@ -197,6 +191,7 @@ W_CTypePtrBase.__init__(self, space, size, extra, 2, ctitem) def newp(self, w_init): + from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion space = self.space ctitem = self.ctitem datasize = ctitem.size @@ -204,10 +199,15 @@ raise operationerrfmt(space.w_TypeError, "cannot instantiate ctype '%s' of unknown size", self.name) - if self.is_struct_ptr: + if isinstance(ctitem, W_CTypeStructOrUnion): # 'newp' on a struct-or-union pointer: in this case, we return # a W_CDataPtrToStruct object which has a strong reference # to a W_CDataNewOwning that really contains the structure. + # + if ctitem.with_var_array and not space.is_w(w_init, space.w_None): + datasize = ctitem.convert_struct_from_object( + lltype.nullptr(rffi.CCHARP.TO), w_init, datasize) + # cdatastruct = cdataobj.W_CDataNewOwning(space, datasize, ctitem) cdata = cdataobj.W_CDataPtrToStructOrUnion(space, cdatastruct._cdata, @@ -238,11 +238,15 @@ def add(self, cdata, i): space = self.space ctitem = self.ctitem + itemsize = ctitem.size if ctitem.size < 0: - raise operationerrfmt(space.w_TypeError, + if self.is_void_ptr: + itemsize = 1 + else: + raise operationerrfmt(space.w_TypeError, "ctype '%s' points to items of unknown size", self.name) - p = rffi.ptradd(cdata, i * self.ctitem.size) + p = rffi.ptradd(cdata, i * itemsize) return cdataobj.W_CData(space, p, self) def cast(self, w_ob): @@ -298,7 +302,6 @@ def convert_argument_from_object(self, cdata, w_ob): from pypy.module._cffi_backend.ctypefunc import set_mustfree_flag - space = self.space result = (not isinstance(w_ob, cdataobj.W_CData) and self._prepare_pointer_call_argument(w_ob, cdata)) if result == 0: @@ -320,7 +323,8 @@ space = self.space ctype2 = cdata.ctype if (isinstance(ctype2, W_CTypeStructOrUnion) or - (isinstance(ctype2, W_CTypePtrOrArray) and ctype2.is_struct_ptr)): + (isinstance(ctype2, W_CTypePtrOrArray) and + isinstance(ctype2.ctitem, W_CTypeStructOrUnion))): ptrdata = rffi.ptradd(cdata._cdata, offset) return cdataobj.W_CData(space, ptrdata, self) else: diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -9,7 +9,8 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, intmask -from rpython.rtyper.lltypesystem import rffi +from rpython.rlib.rarithmetic import ovfcheck +from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module._cffi_backend import cdataobj, ctypeprim, misc from pypy.module._cffi_backend.ctypeobj import W_CType @@ -17,12 +18,13 @@ class W_CTypeStructOrUnion(W_CType): _immutable_fields_ = ['alignment?', 'fields_list?', 'fields_dict?', - 'custom_field_pos?'] + 'custom_field_pos?', 'with_var_array?'] # fields added by complete_struct_or_union(): alignment = -1 fields_list = None fields_dict = None custom_field_pos = False + with_var_array = False def __init__(self, space, name): W_CType.__init__(self, space, -1, name, len(name)) @@ -90,12 +92,16 @@ pass def convert_from_object(self, cdata, w_ob): - space = self.space - if self._copy_from_same(cdata, w_ob): - return + if not self._copy_from_same(cdata, w_ob): + self.convert_struct_from_object(cdata, w_ob, optvarsize=-1) + @jit.look_inside_iff( + lambda self, cdata, w_ob, optvarsize: jit.isvirtual(w_ob) + ) + def convert_struct_from_object(self, cdata, w_ob, optvarsize): self._check_only_one_argument_for_union(w_ob) + space = self.space if (space.isinstance_w(w_ob, space.w_list) or space.isinstance_w(w_ob, space.w_tuple)): lst_w = space.listview(w_ob) @@ -104,7 +110,9 @@ "too many initializers for '%s' (got %d)", self.name, len(lst_w)) for i in range(len(lst_w)): - self.fields_list[i].write(cdata, lst_w[i]) + optvarsize = self.fields_list[i].write_v(cdata, lst_w[i], + optvarsize) + return optvarsize elif space.isinstance_w(w_ob, space.w_dict): lst_w = space.fixedview(w_ob) @@ -116,11 +124,16 @@ except KeyError: space.raise_key_error(w_key) assert 0 - cf.write(cdata, space.getitem(w_ob, w_key)) + optvarsize = cf.write_v(cdata, space.getitem(w_ob, w_key), + optvarsize) + return optvarsize else: - raise self._convert_error("list or tuple or dict or struct-cdata", - w_ob) + if optvarsize == -1: + msg = "list or tuple or dict or struct-cdata" + else: + msg = "list or tuple or dict" + raise self._convert_error(msg, w_ob) @jit.elidable def _getcfield_const(self, attr): @@ -192,6 +205,37 @@ else: self.ctype.convert_from_object(cdata, w_ob) + def write_v(self, cdata, w_ob, optvarsize): + # a special case for var-sized C99 arrays + from pypy.module._cffi_backend import ctypearray + ct = self.ctype + if isinstance(ct, ctypearray.W_CTypeArray) and ct.length < 0: + space = ct.space + w_ob, varsizelength = misc.get_new_array_length(space, w_ob) + if optvarsize != -1: + # in this mode, the only purpose of this function is to compute + # the real size of the structure from a var-sized C99 array + assert cdata == lltype.nullptr(rffi.CCHARP.TO) + itemsize = ct.ctitem.size + try: + varsize = ovfcheck(itemsize * varsizelength) + size = ovfcheck(self.offset + varsize) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("array size would overflow a ssize_t")) + assert size >= 0 + return max(size, optvarsize) + # if 'value' was only an integer, get_new_array_length() returns + # w_ob = space.w_None. Detect if this was the case, + # and if so, stop here, leaving the content uninitialized + # (it should be zero-initialized from somewhere else). + if space.is_w(w_ob, space.w_None): + return optvarsize + # + if optvarsize == -1: + self.write(cdata, w_ob) + return optvarsize + def convert_bitfield_to_object(self, cdata): ctype = self.ctype space = ctype.space diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py --- a/pypy/module/_cffi_backend/misc.py +++ b/pypy/module/_cffi_backend/misc.py @@ -278,6 +278,22 @@ # ____________________________________________________________ +def get_new_array_length(space, w_value): + if (space.isinstance_w(w_value, space.w_list) or + space.isinstance_w(w_value, space.w_tuple)): + return (w_value, space.int_w(space.len(w_value))) + elif space.isinstance_w(w_value, space.w_basestring): + # from a string, we add the null terminator + return (w_value, space.int_w(space.len(w_value)) + 1) + else: + explicitlength = space.getindex_w(w_value, space.w_OverflowError) + if explicitlength < 0: + raise OperationError(space.w_ValueError, + space.wrap("negative array length")) + return (space.w_None, explicitlength) + +# ____________________________________________________________ + @specialize.arg(0) def _raw_memcopy_tp(TPP, source, dest): # in its own function: LONGLONG may make the whole function jit-opaque diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -158,8 +158,10 @@ fields_list = [] fields_dict = {} custom_field_pos = False + with_var_array = False - for w_field in fields_w: + for i in range(len(fields_w)): + w_field = fields_w[i] field_w = space.fixedview(w_field) if not (2 <= len(field_w) <= 4): raise OperationError(space.w_TypeError, @@ -176,7 +178,11 @@ "duplicate field name '%s'", fname) # if ftype.size < 0: - raise operationerrfmt(space.w_TypeError, + if (isinstance(ftype, ctypearray.W_CTypeArray) and fbitsize < 0 + and (i == len(fields_w) - 1 or foffset != -1)): + with_var_array = True + else: + raise operationerrfmt(space.w_TypeError, "field '%s.%s' has ctype '%s' of unknown size", w_ctype.name, fname, ftype.name) # @@ -235,7 +241,8 @@ fields_list.append(fld) fields_dict[fname] = fld - boffset += ftype.size * 8 + if ftype.size >= 0: + boffset += ftype.size * 8 prev_bitfield_size = 0 From noreply at buildbot.pypy.org Mon Nov 11 23:27:20 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 11 Nov 2013 23:27:20 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: merge default Message-ID: <20131111222720.2B6CB1C02AE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r67961:9a43cf95947f Date: 2013-11-11 12:25 -0800 http://bitbucket.org/pypy/pypy/changeset/9a43cf95947f/ Log: merge default diff too long, truncating to 2000 out of 28759 lines diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,7 +1,38 @@ all: pypy-c +PYPY_EXECUTABLE := $(shell which pypy) +URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") + +ifeq ($(PYPY_EXECUTABLE),) +RUNINTERP = python +else +RUNINTERP = $(PYPY_EXECUTABLE) +endif + pypy-c: - @echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM" - @sleep 3 - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + @echo + @echo "====================================================================" +ifeq ($(PYPY_EXECUTABLE),) + @echo "Building a regular (jitting) version of PyPy, using CPython." + @echo "This takes around 2 hours and $(URAM) GB of RAM." + @echo "Note that pre-installing a PyPy binary would reduce this time" + @echo "and produce basically the same result." +else + @echo "Building a regular (jitting) version of PyPy, using" + @echo "$(PYPY_EXECUTABLE) to run the translation itself." + @echo "This takes up to 1 hour and $(URAM) GB of RAM." +endif + @echo + @echo "For more control (e.g. to use multiple CPU cores during part of" + @echo "the process) you need to run \`\`rpython/bin/rpython'' directly." + @echo "For more information see \`\`http://pypy.org/download.html''." + @echo "====================================================================" + @echo + @sleep 5 + $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + +# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are +# replaced with an opaque --jobserver option by the time this Makefile +# runs. We cannot get their original value either: +# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -1,5 +1,5 @@ ===================================== -PyPy: Python in Python Implementation +PyPy: Python in Python Implementation ===================================== Welcome to PyPy! @@ -26,9 +26,11 @@ Building ======== -build with:: +build with: - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py +.. code-block:: console + + $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest to use virtualenv with the resulting pypy-c as the interpreter, you can diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -4,6 +4,21 @@ from __pypy__.builders import StringBuilder, UnicodeBuilder +class StringOrUnicodeBuilder(object): + def __init__(self): + self._builder = StringBuilder() + def append(self, string): + try: + self._builder.append(string) + except UnicodeEncodeError: + ub = UnicodeBuilder() + ub.append(self._builder.build()) + self._builder = ub + ub.append(string) + def build(self): + return self._builder.build() + + ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') @@ -192,7 +207,7 @@ if self.ensure_ascii: builder = StringBuilder() else: - builder = UnicodeBuilder() + builder = StringOrUnicodeBuilder() self.__encode(o, markers, builder, 0) return builder.build() diff --git a/lib-python/2.7/string.py b/lib-python/2.7/string.py --- a/lib-python/2.7/string.py +++ b/lib-python/2.7/string.py @@ -66,16 +66,17 @@ must be of the same length. """ - if len(fromstr) != len(tostr): + n = len(fromstr) + if n != len(tostr): raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) + # this function has been rewritten to suit PyPy better; it is + # almost 10x faster than the original. + buf = bytearray(256) + for i in range(256): + buf[i] = i + for i in range(n): + buf[ord(fromstr[i])] = tostr[i] + return str(buf) diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -48,7 +48,14 @@ def remove(wr, selfref=ref(self)): self = selfref() if self is not None: - del self.data[wr.key] + # Changed this for PyPy: made more resistent. The + # issue is that in some corner cases, self.data + # might already be changed or removed by the time + # this weakref's callback is called. If that is + # the case, we don't want to randomly kill an + # unrelated entry. + if self.data.get(wr.key) is wr: + del self.data[wr.key] self._remove = remove UserDict.UserDict.__init__(self, *args, **kw) @@ -160,22 +167,26 @@ try: o = self.data.pop(key)() except KeyError: + o = None + if o is None: if args: return args[0] - raise - if o is None: raise KeyError, key else: return o + # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: - wr = self.data[key] + o = self.data[key]() except KeyError: + o = None + if o is None: self.data[key] = KeyedRef(default, self._remove, key) return default else: - return wr() + return o + # The logic above was fixed in PyPy def update(self, dict=None, **kwargs): d = self.data diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.7 +Version: 0.8 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7.2" -__version_info__ = (0, 7, 2) +__version__ = "0.8" +__version_info__ = (0, 8) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,5 @@ import types +from .lock import allocate_lock try: callable @@ -61,6 +62,7 @@ # rely on it! It's probably not going to work well.) self._backend = backend + self._lock = allocate_lock() self._parser = cparser.Parser() self._cached_btypes = {} self._parsed_types = types.ModuleType('parsed_types').__dict__ @@ -74,7 +76,8 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - self.BVoidP = self._get_cached_btype(model.voidp_type) + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -95,11 +98,12 @@ if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') - self._parser.parse(csource, override=override) - self._cdefsources.append(csource) - if override: - for cache in self._function_caches: - cache.clear() + with self._lock: + self._parser.parse(csource, override=override) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -109,31 +113,47 @@ library we only look for the actual (untyped) symbols. """ assert isinstance(name, basestring) or name is None - lib, function_cache = _make_ffi_library(self, name, flags) - self._function_caches.append(function_cache) - self._libraries.append(lib) + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) return lib + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + really_a_function_type = type.is_raw_function + if really_a_function_type: + type = type.as_function_pointer() + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, cfaf = self._parsed_types[cdecl] - if consider_function_as_funcptr and not cfaf: - raise KeyError + result = self._parsed_types[cdecl] except KeyError: - key = cdecl - if not isinstance(cdecl, str): # unicode, on Python 2 - cdecl = cdecl.encode('ascii') - cfaf = consider_function_as_funcptr - type = self._parser.parse_type(cdecl, - consider_function_as_funcptr=cfaf) - btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, cfaf + with self._lock: + result = self._typeof_locked(cdecl) + # + btype, really_a_function_type = result + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) return btype def typeof(self, cdecl): """Parse the C type given as a string and return the - corresponding Python type: '>. + corresponding object. It can also be used on 'cdata' instance to get its C type. """ if isinstance(cdecl, basestring): @@ -144,6 +164,10 @@ res = _builtin_function_type(cdecl) if res is not None: return res + if (isinstance(cdecl, types.FunctionType) + and hasattr(cdecl, '_cffi_base_type')): + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) def sizeof(self, cdecl): @@ -280,14 +304,17 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + with self._lock: + try: + gc_weakrefs = self.gc_weakrefs + except AttributeError: + from .gc_weakref import GcWeakrefs + gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) + return gc_weakrefs.build(cdata, destructor) def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! try: BType = self._cached_btypes[type] except KeyError: @@ -322,7 +349,8 @@ def _pointer_to(self, ctype): from . import model - return model.pointer_cache(self, ctype) + with self._lock: + return model.pointer_cache(self, ctype) def addressof(self, cdata, field=None): """Return the address of a . @@ -342,10 +370,12 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ - self._parser.include(ffi_to_include._parser) - self._cdefsources.append('[') - self._cdefsources.extend(ffi_to_include._cdefsources) - self._cdefsources.append(']') + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) @@ -372,7 +402,7 @@ backendlib = backend.load_library(path, flags) copied_enums = [] # - def make_accessor(name): + def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: tp = ffi._parser._declarations[key] @@ -404,11 +434,17 @@ if enumname not in library.__dict__: library.__dict__[enumname] = enumval copied_enums.append(True) + if name in library.__dict__: + return # - if name in library.__dict__: # copied from an enum value just above, - return # or multithread's race condition raise AttributeError(name) # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + make_accessor_locked(name) + # class FFILibrary(object): def __getattr__(self, name): make_accessor(name) @@ -444,4 +480,5 @@ except (KeyError, AttributeError, TypeError): return None else: - return ffi._get_cached_btype(tp) + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -142,7 +142,7 @@ if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] if line: - msg = 'cannot parse "%s"\n%s' % (line, msg) + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) @@ -217,19 +217,18 @@ # if decl.name: tp = self._get_type(node, partial_length_ok=True) - if self._is_constant_declaration(node): + if self._is_constant_globalvar(node): self._declare('constant ' + decl.name, tp) else: self._declare('variable ' + decl.name, tp) - def parse_type(self, cdecl, consider_function_as_funcptr=False): + def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type, - consider_function_as_funcptr=consider_function_as_funcptr) + return self._get_type(exprnode.type) def _declare(self, name, obj): if name in self._declarations: @@ -249,28 +248,17 @@ return model.ConstPointerType(type) return model.PointerType(type) - def _get_type(self, typenode, convert_array_to_pointer=False, - name=None, partial_length_ok=False, - consider_function_as_funcptr=False): + def _get_type(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): type = self._declarations['typedef ' + typenode.type.names[0]] - if isinstance(type, model.ArrayType): - if convert_array_to_pointer: - return type.item - else: - if (consider_function_as_funcptr and - isinstance(type, model.RawFunctionType)): - return type.as_function_pointer() return type # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type - if convert_array_to_pointer: - return self._get_type_pointer(self._get_type(typenode.type)) if typenode.dim is None: length = None else: @@ -331,10 +319,7 @@ # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - result = self._parse_function_type(typenode, name) - if consider_function_as_funcptr: - result = result.as_function_pointer() - return result + return self._parse_function_type(typenode, name) # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): @@ -365,21 +350,24 @@ isinstance(params[0].type.type, pycparser.c_ast.IdentifierType) and list(params[0].type.type.names) == ['void']): del params[0] - args = [self._get_type(argdeclnode.type, - convert_array_to_pointer=True, - consider_function_as_funcptr=True) + args = [self._as_func_arg(self._get_type(argdeclnode.type)) for argdeclnode in params] result = self._get_type(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _is_constant_declaration(self, typenode, const=False): - if isinstance(typenode, pycparser.c_ast.ArrayDecl): - return self._is_constant_declaration(typenode.type) + def _as_func_arg(self, type): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _is_constant_globalvar(self, typenode): if isinstance(typenode, pycparser.c_ast.PtrDecl): - const = 'const' in typenode.quals - return self._is_constant_declaration(typenode.type, const) + return 'const' in typenode.quals if isinstance(typenode, pycparser.c_ast.TypeDecl): - return const or 'const' in typenode.quals + return 'const' in typenode.quals return False def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): @@ -491,7 +479,7 @@ return tp def _make_partial(self, tp, nested): - if not isinstance(tp, model.StructType): + if not isinstance(tp, model.StructOrUnion): raise api.CDefError("%s cannot be partial" % (tp,)) if not tp.has_c_name() and not nested: raise NotImplementedError("%s is partial but has no C name" %(tp,)) @@ -511,7 +499,7 @@ if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): self._partial_length = True - return None + return '...' # raise api.FFIError("unsupported expression: expected a " "simple numeric constant") diff --git a/lib_pypy/cffi/gc_weakref.py b/lib_pypy/cffi/gc_weakref.py --- a/lib_pypy/cffi/gc_weakref.py +++ b/lib_pypy/cffi/gc_weakref.py @@ -14,6 +14,6 @@ def build(self, cdata, destructor): # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi.typeof(cdata), cdata) + new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) self.data[ref(new_cdata, self.remove)] = destructor, cdata return new_cdata diff --git a/lib_pypy/cffi/lock.py b/lib_pypy/cffi/lock.py new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,7 +1,10 @@ import weakref +from .lock import allocate_lock + class BaseTypeByIdentity(object): is_array_type = False + is_raw_function = False def get_c_name(self, replace_with='', context='a C file'): result = self.c_name_with_marker @@ -146,6 +149,7 @@ # a function, but not a pointer-to-function. The backend has no # notion of such a type; it's used temporarily by parsing. _base_pattern = '(&)(%s)' + is_raw_function = True def build_backend_type(self, ffi, finishlist): from . import api @@ -192,10 +196,6 @@ _base_pattern = " const *&" _base_pattern_array = "(const *&)" - def build_backend_type(self, ffi, finishlist): - BPtr = PointerType(self.totype).get_cached_btype(ffi, finishlist) - return BPtr - const_voidp_type = ConstPointerType(void_type) @@ -216,10 +216,12 @@ self.item = item self.length = length # - if self.length is None: + if length is None: brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' else: - brackets = '&[%d]' % self.length + brackets = '&[%d]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) @@ -227,6 +229,10 @@ return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): + if self.length == '...': + from . import api + raise api.CDefError("cannot render the type %r: unknown length" % + (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) @@ -252,6 +258,7 @@ class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None completed = False + partial = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -303,20 +310,21 @@ return # not completing it: it's an opaque struct # self.completed = 1 - fldtypes = tuple(tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes) # if self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) ffi._backend.complete_struct_or_union(BType, lst, self) # else: + fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # - if isinstance(ftype, ArrayType) and ftype.length is None: + if isinstance(ftype, ArrayType) and ftype.length == '...': # fix the length to match the total size BItemType = ftype.item.get_cached_btype(ffi, finishlist) nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) @@ -327,18 +335,20 @@ ftype = ftype.resolve_length(nlen) self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) - BArrayType = ftype.get_cached_btype(ffi, finishlist) - fldtypes = (fldtypes[:i] + (BArrayType,) + - fldtypes[i+1:]) - continue # - bitemsize = ffi.sizeof(fldtypes[i]) - if bitemsize != fsize: - self._verification_error( - "field '%s.%s' is declared as %d bytes, but is " - "really %d bytes" % (self.name, - self.fldnames[i] or '{}', - bitemsize, fsize)) + BFieldType = ftype.get_cached_btype(ffi, finishlist) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) + fldtypes.append(BFieldType) + # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) ffi._backend.complete_struct_or_union(BType, lst, self, totalsize, totalalignment) @@ -348,11 +358,6 @@ from .ffiplatform import VerificationError raise VerificationError(msg) - -class StructType(StructOrUnion): - kind = 'struct' - partial = False - def check_not_partial(self): if self.partial and self.fixedlayout is None: from . import ffiplatform @@ -361,19 +366,18 @@ def build_backend_type(self, ffi, finishlist): self.check_not_partial() finishlist.append(self) - - return global_cache(self, ffi, 'new_struct_type', + # + return global_cache(self, ffi, 'new_%s_type' % self.kind, self.get_official_name(), key=self) +class StructType(StructOrUnion): + kind = 'struct' + + class UnionType(StructOrUnion): kind = 'union' - def build_backend_type(self, ffi, finishlist): - finishlist.append(self) - return global_cache(self, ffi, 'new_union_type', - self.get_official_name(), key=self) - class EnumType(StructOrUnionOrEnum): kind = 'enum' @@ -387,6 +391,12 @@ self.baseinttype = baseinttype self.build_c_name_with_marker() + def force_the_name(self, forcename): + StructOrUnionOrEnum.force_the_name(self, forcename) + if self.forcename is None: + name = self.get_official_name() + self.forcename = '$' + name.replace(' ', '_') + def check_not_partial(self): if self.partial and not self.partial_resolved: from . import ffiplatform @@ -444,6 +454,9 @@ tp = StructType(structname, None, None, None) return NamedPointerType(tp, name) + +global_lock = allocate_lock() + def global_cache(srctype, ffi, funcname, *args, **kwds): key = kwds.pop('key', (funcname, args)) assert not kwds @@ -464,8 +477,17 @@ res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: raise NotImplementedError("%r: %s" % (srctype, e)) - ffi._backend.__typecache[key] = res - return res + # note that setdefault() on WeakValueDictionary is not atomic + # and contains a rare bug (http://bugs.python.org/issue19542); + # we have to use a lock and do it ourselves + cache = ffi._backend.__typecache + with global_lock: + res1 = cache.get(key) + if res1 is None: + cache[key] = res + return res + else: + return res1 def pointer_cache(ffi, BType): return global_cache('?', ffi, 'new_pointer_type', BType) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -15,7 +15,7 @@ def patch_extension_kwds(self, kwds): pass - def find_module(self, module_name, path, so_suffix): + def find_module(self, module_name, path, so_suffixes): try: f, filename, descr = imp.find_module(module_name, path) except ImportError: @@ -25,7 +25,7 @@ # Note that after a setuptools installation, there are both .py # and .so files with the same basename. The code here relies on # imp.find_module() locating the .so in priority. - if descr[0] != so_suffix: + if descr[0] not in so_suffixes: return None return filename @@ -160,7 +160,10 @@ def __dir__(self): return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() - module._cffi_setup(lst, ffiplatform.VerificationError, library) + if module._cffi_setup(lst, ffiplatform.VerificationError, library): + import warnings + warnings.warn("reimporting %r might overwrite older definitions" + % (self.verifier.get_module_name())) # # finally, call the loaded_cpy_xxx() functions. This will perform # the final adjustments, like copying the Python->C wrapper @@ -280,8 +283,8 @@ return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, model.ArrayType): - return '_cffi_from_c_deref((char *)%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) elif isinstance(tp, model.StructType): if tp.fldnames is None: raise TypeError("'%s' is used as %s, but is opaque" % ( @@ -464,11 +467,14 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return _cffi_get_struct_layout(nums);') @@ -491,7 +497,7 @@ # function = getattr(module, layoutfuncname) layout = function() - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -528,9 +534,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -566,7 +573,7 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True): + vartp=None, delayed=True, size_too=False): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -597,6 +604,15 @@ '(unsigned long long)(%s));' % (name,)) prnt(' if (o == NULL)') prnt(' return -1;') + if size_too: + prnt(' {') + prnt(' PyObject *o1 = o;') + prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));' + % (name,)) + prnt(' Py_DECREF(o1);') + prnt(' if (o == NULL)') + prnt(' return -1;') + prnt(' }') prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name) prnt(' Py_DECREF(o);') prnt(' if (res < 0)') @@ -633,12 +649,23 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %d, ' - 'not %d",') - prnt(' "%s", "%s", (int)%s, %d);' % ( - name, enumerator, enumerator, enumvalue)) + prnt(' "enum %s: %s has the real value %s, ' + 'not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + name, enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return %s;' % self._chained_list_constants[True]) @@ -677,15 +704,16 @@ def _generate_cpy_variable_collecttype(self, tp, name): if isinstance(tp, model.ArrayType): - self._do_collect_type(tp) + tp_ptr = model.PointerType(tp.item) else: tp_ptr = model.PointerType(tp) - self._do_collect_type(tp_ptr) + self._do_collect_type(tp_ptr) def _generate_cpy_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): tp_ptr = model.PointerType(tp.item) - self._generate_cpy_const(False, name, tp, vartp=tp_ptr) + self._generate_cpy_const(False, name, tp, vartp=tp_ptr, + size_too = (tp.length == '...')) else: tp_ptr = model.PointerType(tp) self._generate_cpy_const(False, name, tp_ptr, category='var') @@ -694,11 +722,29 @@ _loading_cpy_variable = _loaded_noop def _loaded_cpy_variable(self, tp, name, module, library): + value = getattr(library, name) if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the - return # sense that "a=..." is forbidden + # sense that "a=..." is forbidden + if tp.length == '...': + assert isinstance(value, tuple) + (value, size) = value + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. - ptr = getattr(library, name) + ptr = value delattr(library, name) def getter(library): return ptr[0] @@ -711,12 +757,9 @@ def _generate_setup_custom(self): prnt = self._prnt - prnt('static PyObject *_cffi_setup_custom(PyObject *lib)') + prnt('static int _cffi_setup_custom(PyObject *lib)') prnt('{') - prnt(' if (%s < 0)' % self._chained_list_constants[True]) - prnt(' return NULL;') - prnt(' Py_INCREF(Py_None);') - prnt(' return Py_None;') + prnt(' return %s;' % self._chained_list_constants[True]) prnt('}') cffimod_header = r''' @@ -834,17 +877,20 @@ static void *_cffi_exports[_CFFI_NUM_EXPORTS]; static PyObject *_cffi_types, *_cffi_VerificationError; -static PyObject *_cffi_setup_custom(PyObject *lib); /* forward */ +static int _cffi_setup_custom(PyObject *lib); /* forward */ static PyObject *_cffi_setup(PyObject *self, PyObject *args) { PyObject *library; + int was_alive = (_cffi_types != NULL); if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, &library)) return NULL; Py_INCREF(_cffi_types); Py_INCREF(_cffi_VerificationError); - return _cffi_setup_custom(library); + if (_cffi_setup_custom(library) < 0) + return NULL; + return PyBool_FromLong(was_alive); } static void _cffi_init(void) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -20,15 +20,15 @@ # up in kwds['export_symbols']. kwds.setdefault('export_symbols', self.export_symbols) - def find_module(self, module_name, path, so_suffix): - basename = module_name + so_suffix - if path is None: - path = sys.path - for dirname in path: - filename = os.path.join(dirname, basename) - if os.path.isfile(filename): - return filename - return None + def find_module(self, module_name, path, so_suffixes): + for so_suffix in so_suffixes: + basename = module_name + so_suffix + if path is None: + path = sys.path + for dirname in path: + filename = os.path.join(dirname, basename) + if os.path.isfile(filename): + return filename def collect_types(self): pass # not needed in the generic engine @@ -173,6 +173,7 @@ newfunction = self._load_constant(False, tp, name, module) else: indirections = [] + base_tp = tp if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): indirect_args = [] for i, typ in enumerate(tp.args): @@ -186,16 +187,18 @@ wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) for i, typ in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, typ) + newfunction = self._make_struct_wrapper(newfunction, i, typ, + base_tp) setattr(library, name, newfunction) type(library)._cffi_dir.append(name) - def _make_struct_wrapper(self, oldfunc, i, tp): + def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): backend = self.ffi._backend BType = self.ffi._get_cached_btype(tp) def newfunc(*args): args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] return oldfunc(*args) + newfunc._cffi_base_type = base_tp return newfunc # ---------- @@ -252,11 +255,14 @@ prnt(' static ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return nums[i];') @@ -270,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi.typeof("ssize_t(*)(ssize_t)") + BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -279,7 +285,7 @@ if x < 0: break layout.append(x) num += 1 - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -316,9 +322,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -379,15 +386,17 @@ def _load_constant(self, is_int, tp, name, module): funcname = '_cffi_const_%s' % name if is_int: - BFunc = self.ffi.typeof("int(*)(long long*)") + BType = self.ffi._typeof_locked("long long*")[0] + BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) - p = self.ffi.new("long long*") + p = self.ffi.new(BType) negative = function(p) value = int(p[0]) if value < 0 and not negative: - value += (1 << (8*self.ffi.sizeof("long long"))) + BLongLong = self.ffi._typeof_locked("long long")[0] + value += (1 << (8*self.ffi.sizeof(BLongLong))) else: - BFunc = self.ffi.typeof(tp.get_c_name('(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() return value @@ -413,11 +422,22 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' snprintf(out_error, 255,' - '"%s has the real value %d, not %d",') - prnt(' "%s", (int)%s, %d);' % ( - enumerator, enumerator, enumvalue)) + ' "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % ( + enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') @@ -431,10 +451,11 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BFunc = self.ffi.typeof("int(*)(char*)") + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = '_cffi_e_%s_%s' % (prefix, name) function = module.load_function(BFunc, funcname) - p = self.ffi.new("char[]", 256) + p = self.ffi.new(BType, 256) if function(p) < 0: error = self.ffi.string(p) if sys.version_info >= (3,): @@ -465,6 +486,14 @@ def _generate_gen_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): + if tp.length == '...': + prnt = self._prnt + funcname = '_cffi_sizeof_%s' % (name,) + self.export_symbols.append(funcname) + prnt("size_t %s(void)" % funcname) + prnt("{") + prnt(" return sizeof(%s);" % (name,)) + prnt("}") tp_ptr = model.PointerType(tp.item) self._generate_gen_const(False, name, tp_ptr) else: @@ -476,6 +505,18 @@ def _loaded_gen_variable(self, tp, name, module, library): if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the # sense that "a=..." is forbidden + if tp.length == '...': + funcname = '_cffi_sizeof_%s' % (name,) + BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0] + function = module.load_function(BFunc, funcname) + size = function() + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) tp_ptr = model.PointerType(tp.item) value = self._load_constant(False, tp_ptr, name, module) # 'value' is a which we have to replace with @@ -489,7 +530,7 @@ # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. funcname = '_cffi_var_%s' % name - BFunc = self.ffi.typeof(tp.get_c_name('*(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0] function = module.load_function(BFunc, funcname) ptr = function() def getter(library): diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -31,7 +31,7 @@ k2 = k2.lstrip('0').rstrip('L') modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key, k1, k2) - suffix = _get_so_suffix() + suffix = _get_so_suffixes()[0] self.tmpdir = tmpdir or _caller_dir_pycache() self.sourcefilename = os.path.join(self.tmpdir, modulename + '.c') self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) @@ -42,18 +42,21 @@ def write_source(self, file=None): """Write the C source code. It is produced in 'self.sourcefilename', which can be tweaked beforehand.""" - if self._has_source and file is None: - raise ffiplatform.VerificationError("source code already written") - self._write_source(file) + with self.ffi._lock: + if self._has_source and file is None: + raise ffiplatform.VerificationError( + "source code already written") + self._write_source(file) def compile_module(self): """Write the C source code (if not done already) and compile it. This produces a dynamic link library in 'self.modulefilename'.""" - if self._has_module: - raise ffiplatform.VerificationError("module already compiled") - if not self._has_source: - self._write_source() - self._compile_module() + with self.ffi._lock: + if self._has_module: + raise ffiplatform.VerificationError("module already compiled") + if not self._has_source: + self._write_source() + self._compile_module() def load_library(self): """Get a C module from this Verifier instance. @@ -62,11 +65,14 @@ operations to the C module. If necessary, the C code is written and compiled first. """ - if not self._has_module: - self._locate_module() + with self.ffi._lock: if not self._has_module: - self.compile_module() - return self._load_library() + self._locate_module() + if not self._has_module: + if not self._has_source: + self._write_source() + self._compile_module() + return self._load_library() def get_module_name(self): basename = os.path.basename(self.modulefilename) @@ -81,7 +87,9 @@ def get_extension(self): if not self._has_source: - self._write_source() + with self.ffi._lock: + if not self._has_source: + self._write_source() sourcename = ffiplatform.maybe_relative_path(self.sourcefilename) modname = self.get_module_name() return ffiplatform.get_extension(sourcename, modname, **self.kwds) @@ -103,7 +111,7 @@ else: path = None filename = self._vengine.find_module(self.get_module_name(), path, - _get_so_suffix()) + _get_so_suffixes()) if filename is None: return self.modulefilename = filename @@ -193,7 +201,7 @@ if keep_so: suffix = '.c' # only remove .c files else: - suffix = _get_so_suffix().lower() + suffix = _get_so_suffixes()[0].lower() for fn in filelist: if fn.lower().startswith('_cffi_') and ( fn.lower().endswith(suffix) or fn.lower().endswith('.c')): @@ -213,15 +221,20 @@ except OSError: pass -def _get_so_suffix(): +def _get_so_suffixes(): + suffixes = [] for suffix, mode, type in imp.get_suffixes(): if type == imp.C_EXTENSION: - return suffix - # bah, no C_EXTENSION available. Occurs on pypy without cpyext - if sys.platform == 'win32': - return ".pyd" - else: - return ".so" + suffixes.append(suffix) + + if not suffixes: + # bah, no C_EXTENSION available. Occurs on pypy without cpyext + if sys.platform == 'win32': + suffixes = [".pyd"] + else: + suffixes = [".so"] + + return suffixes def _ensure_dir(filename): try: diff --git a/lib_pypy/numpy.py b/lib_pypy/numpy.py deleted file mode 100644 --- a/lib_pypy/numpy.py +++ /dev/null @@ -1,12 +0,0 @@ -import warnings -import sys -if 'numpypy' not in sys.modules: - warnings.warn( - "The 'numpy' module of PyPy is in-development and not complete. " - "To avoid this warning, write 'import numpypy as numpy'. ", - UserWarning) # XXX is this the best warning type? - -from numpypy import * -import numpypy -__all__ = numpypy.__all__ -del numpypy diff --git a/lib_pypy/numpypy/__init__.py b/lib_pypy/numpypy/__init__.py deleted file mode 100644 --- a/lib_pypy/numpypy/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -import core -from core import * -import lib -from lib import * - -from __builtin__ import bool, int, long, float, complex, object, unicode, str -from core import abs, max, min - -__version__ = '1.7.0' - -import os -def get_include(): - """ - Return the directory that contains the NumPy \\*.h header files. - - Extension modules that need to compile against NumPy should use this - function to locate the appropriate include directory. - - Notes - ----- - When using ``distutils``, for example in ``setup.py``. - :: - - import numpy as np - ... - Extension('extension_name', ... - include_dirs=[np.get_include()]) - ... - - """ - import numpy - if getattr(numpy, 'show_config', None) is None: - # running from numpy source directory - head, tail = os.path.split(os.path.dirname(os.path.abspath(__file__))) - return os.path.join(head, '../include') - else: - # using installed numpy core headers - import numpy.core as core - d = os.path.join(os.path.dirname(core.__file__), 'include') - return d - - - -__all__ = ['__version__', 'get_include'] -__all__ += core.__all__ -__all__ += lib.__all__ - -#import sys -#sys.modules.setdefault('numpy', sys.modules['numpypy']) - - diff --git a/lib_pypy/numpypy/core/__init__.py b/lib_pypy/numpypy/core/__init__.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -import numeric -from numeric import * -import fromnumeric -from fromnumeric import * -import shape_base -from shape_base import * - -from fromnumeric import amax as max, amin as min -from numeric import absolute as abs - -__all__ = [] -__all__ += numeric.__all__ -__all__ += fromnumeric.__all__ -__all__ += shape_base.__all__ diff --git a/lib_pypy/numpypy/core/_methods.py b/lib_pypy/numpypy/core/_methods.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/_methods.py +++ /dev/null @@ -1,109 +0,0 @@ -# Array methods which are called by the both the C-code for the method -# and the Python code for the NumPy-namespace function - -import multiarray as mu -import umath as um -from numeric import asanyarray - -def _amax(a, axis=None, out=None, keepdims=False): - return um.maximum.reduce(a, axis=axis, - out=out, keepdims=keepdims) - -def _amin(a, axis=None, out=None, keepdims=False): - return um.minimum.reduce(a, axis=axis, - out=out, keepdims=keepdims) - -def _sum(a, axis=None, dtype=None, out=None, keepdims=False): - return um.add.reduce(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def _prod(a, axis=None, dtype=None, out=None, keepdims=False): - return um.multiply.reduce(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def _any(a, axis=None, dtype=None, out=None, keepdims=False): - return um.logical_or.reduce(a, axis=axis, dtype=dtype, out=out, - keepdims=keepdims) - -def _all(a, axis=None, dtype=None, out=None, keepdims=False): - return um.logical_and.reduce(a, axis=axis, dtype=dtype, out=out, - keepdims=keepdims) - -def _count_reduce_items(arr, axis): - if axis is None: - axis = tuple(xrange(arr.ndim)) - if not isinstance(axis, tuple): - axis = (axis,) - items = 1 - for ax in axis: - items *= arr.shape[ax] - return items - -def _mean(a, axis=None, dtype=None, out=None, keepdims=False): - arr = asanyarray(a) - - # Upgrade bool, unsigned int, and int to float64 - if dtype is None and arr.dtype.kind in ['b','u','i']: - ret = um.add.reduce(arr, axis=axis, dtype='f8', - out=out, keepdims=keepdims) - else: - ret = um.add.reduce(arr, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - rcount = _count_reduce_items(arr, axis) - if isinstance(ret, mu.ndarray): - ret = um.true_divide(ret, rcount, - out=ret, casting='unsafe', subok=False) - else: - ret = ret / float(rcount) - return ret - -def _var(a, axis=None, dtype=None, out=None, ddof=0, - keepdims=False): - arr = asanyarray(a) - - # First compute the mean, saving 'rcount' for reuse later - if dtype is None and arr.dtype.kind in ['b','u','i']: - arrmean = um.add.reduce(arr, axis=axis, dtype='f8', keepdims=True) - else: - arrmean = um.add.reduce(arr, axis=axis, dtype=dtype, keepdims=True) - rcount = _count_reduce_items(arr, axis) - if isinstance(arrmean, mu.ndarray): - arrmean = um.true_divide(arrmean, rcount, - out=arrmean, casting='unsafe', subok=False) - else: - arrmean = arrmean / float(rcount) - - # arr - arrmean - x = arr - arrmean - - # (arr - arrmean) ** 2 - if arr.dtype.kind == 'c': - x = um.multiply(x, um.conjugate(x), out=x).real - else: - x = um.multiply(x, x, out=x) - - # add.reduce((arr - arrmean) ** 2, axis) - ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - - # add.reduce((arr - arrmean) ** 2, axis) / (n - ddof) - if not keepdims and isinstance(rcount, mu.ndarray): - rcount = rcount.squeeze(axis=axis) - rcount -= ddof - if isinstance(ret, mu.ndarray): - ret = um.true_divide(ret, rcount, - out=ret, casting='unsafe', subok=False) - else: - ret = ret / float(rcount) - - return ret - -def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) - - if isinstance(ret, mu.ndarray): - ret = um.sqrt(ret, out=ret) - else: - ret = um.sqrt(ret) - - return ret diff --git a/lib_pypy/numpypy/core/arrayprint.py b/lib_pypy/numpypy/core/arrayprint.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/arrayprint.py +++ /dev/null @@ -1,750 +0,0 @@ -"""Array printing function - -$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ -""" -__all__ = ["array2string", "set_printoptions", "get_printoptions"] -__docformat__ = 'restructuredtext' - -# -# Written by Konrad Hinsen -# last revision: 1996-3-13 -# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) -# and by Perry Greenfield 2000-4-1 for numarray -# and by Travis Oliphant 2005-8-22 for numpy - -import sys -import numerictypes as _nt -from umath import maximum, minimum, absolute, not_equal, isnan, isinf -#from multiarray import format_longfloat, datetime_as_string, datetime_data -from fromnumeric import ravel - - -def product(x, y): return x*y - -_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension -_summaryThreshold = 1000 # total items > triggers array summarization - -_float_output_precision = 8 -_float_output_suppress_small = False -_line_width = 75 -_nan_str = 'nan' -_inf_str = 'inf' -_formatter = None # formatting function for array elements - -if sys.version_info[0] >= 3: - from functools import reduce - -def set_printoptions(precision=None, threshold=None, edgeitems=None, - linewidth=None, suppress=None, - nanstr=None, infstr=None, - formatter=None): - """ - Set printing options. - - These options determine the way floating point numbers, arrays and - other NumPy objects are displayed. - - Parameters - ---------- - precision : int, optional - Number of digits of precision for floating point output (default 8). - threshold : int, optional - Total number of array elements which trigger summarization - rather than full repr (default 1000). - edgeitems : int, optional - Number of array items in summary at beginning and end of - each dimension (default 3). - linewidth : int, optional - The number of characters per line for the purpose of inserting - line breaks (default 75). - suppress : bool, optional - Whether or not suppress printing of small floating point values - using scientific notation (default False). - nanstr : str, optional - String representation of floating point not-a-number (default nan). - infstr : str, optional - String representation of floating point infinity (default inf). - formatter : dict of callables, optional - If not None, the keys should indicate the type(s) that the respective - formatting function applies to. Callables should return a string. - Types that are not specified (by their corresponding keys) are handled - by the default formatters. Individual types for which a formatter - can be set are:: - - - 'bool' - - 'int' - - 'timedelta' : a `numpy.timedelta64` - - 'datetime' : a `numpy.datetime64` - - 'float' - - 'longfloat' : 128-bit floats - - 'complexfloat' - - 'longcomplexfloat' : composed of two 128-bit floats - - 'numpy_str' : types `numpy.string_` and `numpy.unicode_` - - 'str' : all other strings - - Other keys that can be used to set a group of types at once are:: - - - 'all' : sets all types - - 'int_kind' : sets 'int' - - 'float_kind' : sets 'float' and 'longfloat' - - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' - - See Also - -------- - get_printoptions, set_string_function, array2string - - Notes - ----- - `formatter` is always reset with a call to `set_printoptions`. - - Examples - -------- - Floating point precision can be set: - - >>> np.set_printoptions(precision=4) - >>> print np.array([1.123456789]) - [ 1.1235] - - Long arrays can be summarised: - - >>> np.set_printoptions(threshold=5) - >>> print np.arange(10) - [0 1 2 ..., 7 8 9] - - Small results can be suppressed: - - >>> eps = np.finfo(float).eps - >>> x = np.arange(4.) - >>> x**2 - (x + eps)**2 - array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) - >>> np.set_printoptions(suppress=True) - >>> x**2 - (x + eps)**2 - array([-0., -0., 0., 0.]) - - A custom formatter can be used to display array elements as desired: - - >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) - >>> x = np.arange(3) - >>> x - array([int: 0, int: -1, int: -2]) - >>> np.set_printoptions() # formatter gets reset - >>> x - array([0, 1, 2]) - - To put back the default options, you can use: - - >>> np.set_printoptions(edgeitems=3,infstr='inf', - ... linewidth=75, nanstr='nan', precision=8, - ... suppress=False, threshold=1000, formatter=None) - """ - - global _summaryThreshold, _summaryEdgeItems, _float_output_precision, \ - _line_width, _float_output_suppress_small, _nan_str, _inf_str, \ - _formatter - if linewidth is not None: - _line_width = linewidth - if threshold is not None: - _summaryThreshold = threshold - if edgeitems is not None: - _summaryEdgeItems = edgeitems - if precision is not None: - _float_output_precision = precision - if suppress is not None: - _float_output_suppress_small = not not suppress - if nanstr is not None: - _nan_str = nanstr - if infstr is not None: - _inf_str = infstr - _formatter = formatter - -def get_printoptions(): - """ - Return the current print options. - - Returns - ------- - print_opts : dict - Dictionary of current print options with keys - - - precision : int - - threshold : int - - edgeitems : int - - linewidth : int - - suppress : bool - - nanstr : str - - infstr : str - - formatter : dict of callables - - For a full description of these options, see `set_printoptions`. - - See Also - -------- - set_printoptions, set_string_function - - """ - d = dict(precision=_float_output_precision, - threshold=_summaryThreshold, - edgeitems=_summaryEdgeItems, - linewidth=_line_width, - suppress=_float_output_suppress_small, - nanstr=_nan_str, - infstr=_inf_str, - formatter=_formatter) - return d - -def _leading_trailing(a): - import numeric as _nc - if a.ndim == 1: - if len(a) > 2*_summaryEdgeItems: - b = _nc.concatenate((a[:_summaryEdgeItems], - a[-_summaryEdgeItems:])) - else: - b = a - else: - if len(a) > 2*_summaryEdgeItems: - l = [_leading_trailing(a[i]) for i in range( - min(len(a), _summaryEdgeItems))] - l.extend([_leading_trailing(a[-i]) for i in range( - min(len(a), _summaryEdgeItems),0,-1)]) - else: - l = [_leading_trailing(a[i]) for i in range(0, len(a))] - b = _nc.concatenate(tuple(l)) - return b - -def _boolFormatter(x): - if x: - return ' True' - else: - return 'False' - - -def repr_format(x): - return repr(x) - -def _array2string(a, max_line_width, precision, suppress_small, separator=' ', - prefix="", formatter=None): - - if max_line_width is None: - max_line_width = _line_width - - if precision is None: - precision = _float_output_precision - - if suppress_small is None: - suppress_small = _float_output_suppress_small - - if formatter is None: - formatter = _formatter - - if a.size > _summaryThreshold: - summary_insert = "..., " - data = _leading_trailing(a) - else: - summary_insert = "" - data = ravel(a) - - formatdict = {'bool' : _boolFormatter, - 'int' : IntegerFormat(data), - 'float' : FloatFormat(data, precision, suppress_small), - 'longfloat' : LongFloatFormat(precision), - 'complexfloat' : ComplexFormat(data, precision, - suppress_small), - 'longcomplexfloat' : LongComplexFormat(precision), - 'datetime' : DatetimeFormat(data), - 'timedelta' : TimedeltaFormat(data), - 'numpystr' : repr_format, - 'str' : str} - - if formatter is not None: - fkeys = [k for k in formatter.keys() if formatter[k] is not None] - if 'all' in fkeys: - for key in formatdict.keys(): - formatdict[key] = formatter['all'] - if 'int_kind' in fkeys: - for key in ['int']: - formatdict[key] = formatter['int_kind'] - if 'float_kind' in fkeys: - for key in ['float', 'longfloat']: - formatdict[key] = formatter['float_kind'] - if 'complex_kind' in fkeys: - for key in ['complexfloat', 'longcomplexfloat']: - formatdict[key] = formatter['complex_kind'] - if 'str_kind' in fkeys: - for key in ['numpystr', 'str']: - formatdict[key] = formatter['str_kind'] - for key in formatdict.keys(): - if key in fkeys: - formatdict[key] = formatter[key] - - try: - format_function = a._format - msg = "The `_format` attribute is deprecated in Numpy 2.0 and " \ - "will be removed in 2.1. Use the `formatter` kw instead." - import warnings - warnings.warn(msg, DeprecationWarning) - except AttributeError: - # find the right formatting function for the array - dtypeobj = a.dtype.type - if issubclass(dtypeobj, _nt.bool_): - format_function = formatdict['bool'] - elif issubclass(dtypeobj, _nt.integer): - #if issubclass(dtypeobj, _nt.timedelta64): - # format_function = formatdict['timedelta'] - #else: - format_function = formatdict['int'] - elif issubclass(dtypeobj, _nt.floating): - if hasattr(_nt, 'longfloat') and issubclass(dtypeobj, _nt.longfloat): - format_function = formatdict['longfloat'] - else: - format_function = formatdict['float'] - elif issubclass(dtypeobj, _nt.complexfloating): - if hasattr(_nt, 'clongfloat') and issubclass(dtypeobj, _nt.clongfloat): - format_function = formatdict['longcomplexfloat'] - else: - format_function = formatdict['complexfloat'] - elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)): - format_function = formatdict['numpystr'] - #elif issubclass(dtypeobj, _nt.datetime64): - # format_function = formatdict['datetime'] - else: - format_function = formatdict['str'] - - # skip over "[" - next_line_prefix = " " - # skip over array( - next_line_prefix += " "*len(prefix) - - lst = _formatArray(a, format_function, len(a.shape), max_line_width, - next_line_prefix, separator, - _summaryEdgeItems, summary_insert)[:-1] - return lst - -def _convert_arrays(obj): - import numeric as _nc - newtup = [] - for k in obj: - if isinstance(k, _nc.ndarray): - k = k.tolist() - elif isinstance(k, tuple): - k = _convert_arrays(k) - newtup.append(k) - return tuple(newtup) - - -def array2string(a, max_line_width=None, precision=None, - suppress_small=None, separator=' ', prefix="", - style=repr, formatter=None): - """ - Return a string representation of an array. - - Parameters - ---------- - a : ndarray - Input array. - max_line_width : int, optional - The maximum number of columns the string should span. Newline - characters splits the string appropriately after array elements. - precision : int, optional - Floating point precision. Default is the current printing - precision (usually 8), which can be altered using `set_printoptions`. - suppress_small : bool, optional - Represent very small numbers as zero. A number is "very small" if it - is smaller than the current printing precision. - separator : str, optional - Inserted between elements. - prefix : str, optional - An array is typically printed as:: - - 'prefix(' + array2string(a) + ')' - - The length of the prefix string is used to align the - output correctly. - style : function, optional - A function that accepts an ndarray and returns a string. Used only - when the shape of `a` is equal to ``()``, i.e. for 0-D arrays. - formatter : dict of callables, optional - If not None, the keys should indicate the type(s) that the respective - formatting function applies to. Callables should return a string. - Types that are not specified (by their corresponding keys) are handled - by the default formatters. Individual types for which a formatter - can be set are:: - - - 'bool' - - 'int' - - 'timedelta' : a `numpy.timedelta64` - - 'datetime' : a `numpy.datetime64` - - 'float' - - 'longfloat' : 128-bit floats - - 'complexfloat' - - 'longcomplexfloat' : composed of two 128-bit floats - - 'numpy_str' : types `numpy.string_` and `numpy.unicode_` - - 'str' : all other strings - - Other keys that can be used to set a group of types at once are:: - - - 'all' : sets all types - - 'int_kind' : sets 'int' - - 'float_kind' : sets 'float' and 'longfloat' - - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' - - Returns - ------- - array_str : str - String representation of the array. - - Raises - ------ - TypeError : if a callable in `formatter` does not return a string. - - See Also - -------- - array_str, array_repr, set_printoptions, get_printoptions - - Notes - ----- - If a formatter is specified for a certain type, the `precision` keyword is - ignored for that type. - - Examples - -------- - >>> x = np.array([1e-16,1,2,3]) - >>> print np.array2string(x, precision=2, separator=',', - ... suppress_small=True) - [ 0., 1., 2., 3.] - - >>> x = np.arange(3.) - >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) - '[0.00 1.00 2.00]' - - >>> x = np.arange(3) - >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) - '[0x0L 0x1L 0x2L]' - - """ - - if a.shape == (): - x = a.item() - try: - lst = a._format(x) - msg = "The `_format` attribute is deprecated in Numpy " \ - "2.0 and will be removed in 2.1. Use the " \ - "`formatter` kw instead." - import warnings - warnings.warn(msg, DeprecationWarning) - except AttributeError: - if isinstance(x, tuple): - x = _convert_arrays(x) - lst = style(x) - elif reduce(product, a.shape) == 0: From noreply at buildbot.pypy.org Mon Nov 11 23:27:21 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 11 Nov 2013 23:27:21 +0100 (CET) Subject: [pypy-commit] pypy default: some w_self -> self Message-ID: <20131111222721.6072B1C04FF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r67962:261e5f91f5f0 Date: 2013-11-11 12:44 -0800 http://bitbucket.org/pypy/pypy/changeset/261e5f91f5f0/ Log: some w_self -> self diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -52,16 +52,16 @@ from pypy.objspace.std.complextype import complex_typedef as typedef _immutable_fields_ = ['realval', 'imagval'] - def __init__(w_self, realval=0.0, imgval=0.0): - w_self.realval = float(realval) - w_self.imagval = float(imgval) + def __init__(self, realval=0.0, imgval=0.0): + self.realval = float(realval) + self.imagval = float(imgval) - def unwrap(w_self, space): # for tests only - return complex(w_self.realval, w_self.imagval) + def unwrap(self, space): # for tests only + return complex(self.realval, self.imagval) - def __repr__(w_self): + def __repr__(self): """ representation for debugging purposes """ - return "" % (w_self.realval, w_self.imagval) + return "" % (self.realval, self.imagval) def as_tuple(self): return (self.realval, self.imagval) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -27,11 +27,11 @@ typedef = float_typedef - def __init__(w_self, floatval): - w_self.floatval = floatval + def __init__(self, floatval): + self.floatval = floatval - def unwrap(w_self, space): - return w_self.floatval + def unwrap(self, space): + return self.floatval def float_w(self, space): return self.floatval diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -21,9 +21,9 @@ from pypy.objspace.std.longtype import long_typedef as typedef _immutable_fields_ = ['longlong'] - def __init__(w_self, value): + def __init__(self, value): assert isinstance(value, r_longlong) - w_self.longlong = value + self.longlong = value @staticmethod def fromint(value): @@ -33,17 +33,17 @@ def frombigint(bigint): return W_SmallLongObject(bigint.tolonglong()) - def asbigint(w_self): - return rbigint.fromrarith_int(w_self.longlong) + def asbigint(self): + return rbigint.fromrarith_int(self.longlong) def longval(self): return self.longlong - def __repr__(w_self): - return '' % w_self.longlong + def __repr__(self): + return '' % self.longlong - def int_w(w_self, space): - a = w_self.longlong + def int_w(self, space): + a = self.longlong b = intmask(a) if b == a: return b @@ -51,8 +51,8 @@ raise OperationError(space.w_OverflowError, space.wrap( "long int too large to convert to int")) - def uint_w(w_self, space): - a = w_self.longlong + def uint_w(self, space): + a = self.longlong if a < 0: raise OperationError(space.w_ValueError, space.wrap( "cannot convert negative integer to unsigned int")) @@ -63,8 +63,8 @@ raise OperationError(space.w_OverflowError, space.wrap( "long int too large to convert to unsigned int")) - def bigint_w(w_self, space): - return w_self.asbigint() + def bigint_w(self, space): + return self.asbigint() def float_w(self, space): return float(self.longlong) From noreply at buildbot.pypy.org Mon Nov 11 23:27:22 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 11 Nov 2013 23:27:22 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: most of smalllong's SMM removal Message-ID: <20131111222722.D274D1C08A1@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r67963:3ef7d154db05 Date: 2013-11-11 14:22 -0800 http://bitbucket.org/pypy/pypy/changeset/3ef7d154db05/ Log: most of smalllong's SMM removal diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -62,6 +62,7 @@ # long-to-float delegation def delegate_Long2Float(space, w_longobj): + # XXX: tofloat is not abstract (SmallLongs) return W_FloatObject(w_longobj.tofloat(space)) diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -5,6 +5,8 @@ for overflows, something CPython does not do anymore. """ +import operator + from rpython.rlib import jit from rpython.rlib.rarithmetic import ( LONG_BIT, is_valid_int, ovfcheck, string_to_int, r_uint) @@ -52,7 +54,6 @@ def _make_descr_binop(opname): # XXX: func_renamer or func_with_new_name? - import operator from rpython.tool.sourcetools import func_renamer op = getattr(operator, opname) @@ -99,9 +100,11 @@ descr_mul, descr_rmul = _make_descr_binop('mul') def _make_descr_cmp(opname): - import operator op = getattr(operator, opname) def f(self, space, w_other): + # XXX: this doesn't belong here, regardless of how we originally set this up. blargh + #if isinstance(w_other, W_SmallLongObject): + # return space.newbool(op(space.int_w(self), w_other.longlong)) if not space.isinstance_w(w_other, space.w_int): return space.w_NotImplemented @@ -416,9 +419,9 @@ b = b.lshift(3).or_(rbigint.fromint(tag)) return space.newlong_from_rbigint(b) - def unwrap(self, space): + def int_w(self, space): return int(self.intval) - int_w = unwrap + unwrap = int_w def uint_w(self, space): intval = self.intval @@ -447,7 +450,6 @@ def descr_repr(self, space): res = str(self.intval) return space.wrap(res) - descr_str = func_with_new_name(descr_repr, 'descr_str') def _delegate_Int2Long(space, w_intobj): diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -4,7 +4,7 @@ from rpython.rlib.rbigint import rbigint from rpython.rlib.rstring import ParseStringError -from rpython.tool.sourcetools import func_with_new_name +from rpython.tool.sourcetools import func_renamer, func_with_new_name from pypy.interpreter import typedef from pypy.interpreter.error import OperationError, operationerrfmt @@ -17,6 +17,23 @@ from pypy.objspace.std.stdtypedef import StdTypeDef +def delegate_other(func): + @func_renamer(func.__name__) + def delegated(self, space, w_other): + if space.isinstance_w(w_other, space.w_int): + w_other = _delegate_Int2Long(space, w_other) + elif not space.isinstance_w(w_other, space.w_long): + return space.w_NotImplemented + # XXX: if a smalllong, delegate to Long? + assert isinstance(w_other, W_AbstractLongObject) + return func(self, space, w_other) + return delegated + +def _delegate_Int2Long(space, w_intobj): + """int-to-long delegation""" + return W_LongObject.fromint(space, w_intobj.int_w(space)) + + class W_AbstractLongObject(W_Object): __slots__ = () @@ -41,6 +58,114 @@ def int(self, space): raise NotImplementedError + def asbigint(self): + raise NotImplementedError + + def descr_long(self, space): + raise NotImplementedError + descr_index = func_with_new_name(descr_long, 'descr_index') + descr_trunc = func_with_new_name(descr_long, 'descr_trunc') + descr_pos = func_with_new_name(descr_long, 'descr_pos') + + # XXX: + def descr_float(self, space): + raise NotImplementedError + descr_neg = func_with_new_name(descr_long, 'descr_neg') + descr_pos = func_with_new_name(descr_long, 'descr_pos') + descr_abs = func_with_new_name(descr_long, 'descr_abs') + descr_nonzero = func_with_new_name(descr_long, 'descr_nonzero') + descr_invert = func_with_new_name(descr_long, 'descr_invert') + + def descr_lt(self, space, w_other): + raise NotImplementedError + descr_le = func_with_new_name(descr_lt, 'descr_le') + descr_eq = func_with_new_name(descr_lt, 'descr_eq') + descr_ne = func_with_new_name(descr_lt, 'descr_ne') + descr_gt = func_with_new_name(descr_lt, 'descr_gt') + descr_ge = func_with_new_name(descr_lt, 'descr_ge') + + descr_add = func_with_new_name(descr_lt, 'descr_add') + descr_radd = func_with_new_name(descr_lt, 'descr_radd') + descr_sub = func_with_new_name(descr_lt, 'descr_sub') + descr_rsub = func_with_new_name(descr_lt, 'descr_rsub') + descr_mul = func_with_new_name(descr_lt, 'descr_mul') + descr_rmul = func_with_new_name(descr_lt, 'descr_rmul') + + descr_and = func_with_new_name(descr_lt, 'descr_and') + descr_rand = func_with_new_name(descr_lt, 'descr_rand') + descr_or = func_with_new_name(descr_lt, 'descr_or') + descr_ror = func_with_new_name(descr_lt, 'descr_ror') + descr_xor = func_with_new_name(descr_lt, 'descr_xor') + descr_rxor = func_with_new_name(descr_lt, 'descr_rxor') + + descr_lshift = func_with_new_name(descr_lt, 'descr_lshift') + descr_rshift = func_with_new_name(descr_lt, 'descr_rshift') + + descr_floordiv = func_with_new_name(descr_lt, 'descr_floordiv') + descr_rfloordiv = func_with_new_name(descr_lt, 'descr_rfloordiv') + descr_div = func_with_new_name(descr_lt, 'descr_div') + descr_rdiv = func_with_new_name(descr_lt, 'descr_rdiv') + descr_mod = func_with_new_name(descr_lt, 'descr_mod') + descr_rmod = func_with_new_name(descr_lt, 'descr_rmod') + descr_divmod = func_with_new_name(descr_lt, 'descr_divmod') + descr_rdivmod = func_with_new_name(descr_lt, 'descr_rdivmod') + + def descr_pow(self, space, w_exponent, w_modulus=None): + raise NotImplementedError + descr_rpow = func_with_new_name(descr_pow, 'descr_rpow') + + def descr_format(self, space, w_format_spec): + return newformat.run_formatter(space, w_format_spec, + "format_int_or_long", self, + newformat.LONG_KIND) + def descr_repr(self, space): + return space.wrap(self.asbigint().repr()) + + def descr_str(self, space): + return space.wrap(self.asbigint().str()) + + def descr_hash(self, space): + return space.wrap(self.asbigint().hash()) + + def descr_oct(self, space): + return space.wrap(self.asbigint().oct()) + + def descr_hex(self, space): + return space.wrap(self.asbigint().hex()) + + def descr_getnewargs(self, space): + return space.newtuple([W_LongObject(self.asbigint())]) + + def descr_conjugate(self, space): + return space.long(self) + + def descr_bit_length(self, space): + bigint = space.bigint_w(self) + try: + return space.wrap(bigint.bit_length()) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("too many digits in integer")) + + # XXX: need rtruediv + @delegate_other + def descr_truediv(self, space, w_other): + try: + #f = self.num.truediv(w_other.num) + f = self.asbigint().truediv(w_other.asbigint()) + except ZeroDivisionError: + raise operationerrfmt(space.w_ZeroDivisionError, + "long division or modulo by zero") + except OverflowError: + raise operationerrfmt(space.w_OverflowError, + "long/long too large for a float") + return space.newfloat(f) + + @delegate_other + def descr_coerce(self, space, w_other): + # XXX: consider stian's branch where he optimizes long + ints + return space.newtuple([self, w_other]) + class W_LongObject(W_AbstractLongObject): """This is a wrapper of rbigint.""" @@ -111,12 +236,13 @@ except OverflowError: return self.descr_long(space) + def asbigint(self): + return self.num + def __repr__(self): return '' % self.num.tolong() - def descr_conjugate(self, space): - return space.long(self) - + # XXX: make these indirect def descr_get_numerator(self, space): return space.long(self) @@ -129,14 +255,6 @@ def descr_get_imag(self, space): return space.newlong(0) - def descr_get_bit_length(self, space): - bigint = space.bigint_w(self) - try: - return space.wrap(bigint.bit_length()) - except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("too many digits in integer")) - def descr_long(self, space): # long__Long is supposed to do nothing, unless it has a derived # long object, where it should return an exact one. @@ -151,36 +269,23 @@ def descr_float(self, space): return space.newfloat(self.tofloat(space)) - def descr_repr(self, space): - return space.wrap(self.num.repr()) + def _make_descr_cmp(opname): + #from pypy.objspace.std.smalllongobject import W_SmallLongObject + op = getattr(rbigint, opname) + @delegate_other + def descr_impl(self, space, w_other): + ## XXX: these only need explicit SmallLong support whereas + ## everything else would delegate2Long. blah blah + #if isinstance(w_other, W_SmallLongObject): + # result = op(self.num, w_other.asbigint()) + #else: + # result = op(self.num, w_other.num) + #return space.newbool(result) - def descr_str(self, space): - return space.wrap(self.num.str()) - - def descr_format(self, space, w_format_spec): - return newformat.run_formatter(space, w_format_spec, - "format_int_or_long", self, - newformat.LONG_KIND) - - def descr_hash(self, space): - return space.wrap(self.num.hash()) - - def descr_coerce(self, space, w_other): - # XXX: consider stian's branch where he optimizes long + ints - if space.isinstance_w(w_other, space.w_int): - w_other = _delegate_Int2Long(space, w_other) - elif not space.isinstance_w(w_other, space.w_long): - return space.w_NotImplemented - return space.newtuple([self, w_other]) - - def _make_descr_cmp(opname): - op = getattr(rbigint, opname) - def descr_impl(self, space, w_other): - if space.isinstance_w(w_other, space.w_int): - w_other = _delegate_Int2Long(space, w_other) - elif not space.isinstance_w(w_other, space.w_long): - return space.w_NotImplemented - return space.newbool(op(self.num, w_other.num)) + # XXX: if we use self.asbigint then can this live on + # AbstractLong? eek not really, a '_cmp' (_lt) could live on + # it that just did this (without the checks..) + return space.newbool(op(self.num, w_other.asbigint())) return func_with_new_name(descr_impl, "descr_" + opname) descr_lt = _make_descr_cmp('lt') @@ -196,20 +301,14 @@ op = getattr(rbigint, methname) @func_renamer('descr_' + opname) + @delegate_other def descr_binop(self, space, w_other): - if space.isinstance_w(w_other, space.w_int): - w_other = _delegate_Int2Long(space, w_other) - elif not space.isinstance_w(w_other, space.w_long): - return space.w_NotImplemented - return W_LongObject(op(self.num, w_other.num)) + return W_LongObject(op(self.num, w_other.asbigint())) @func_renamer('descr_r' + opname) + @delegate_other def descr_rbinop(self, space, w_other): - if space.isinstance_w(w_other, space.w_int): - w_other = _delegate_Int2Long(space, w_other) - elif not space.isinstance_w(w_other, space.w_long): - return space.w_NotImplemented - return W_LongObject(op(w_other.num, self.num)) + return W_LongObject(op(w_other.asbigint(), self.num)) return descr_binop, descr_rbinop @@ -232,136 +331,103 @@ descr_abs = _make_descr_unaryop('abs') descr_invert = _make_descr_unaryop('invert') - def descr_oct(self, space): - return space.wrap(self.num.oct()) - - def descr_hex(self, space): - return space.wrap(self.num.hex()) - def descr_nonzero(self, space): return space.newbool(self.num.tobool()) + @delegate_other def descr_lshift(self, space, w_other): - if space.isinstance_w(w_other, space.w_int): - w_other = _delegate_Int2Long(space, w_other) - elif not space.isinstance_w(w_other, space.w_long): - return space.w_NotImplemented - # XXX need to replicate some of the logic, to get the errors right - if w_other.num.sign < 0: + if w_other.asbigint().sign < 0: raise operationerrfmt(space.w_ValueError, "negative shift count") try: - shift = w_other.num.toint() + shift = w_other.asbigint().toint() except OverflowError: # b too big raise operationerrfmt(space.w_OverflowError, "shift count too large") return W_LongObject(self.num.lshift(shift)) + @delegate_other def descr_rshift(self, space, w_other): - if space.isinstance_w(w_other, space.w_int): - w_other = _delegate_Int2Long(space, w_other) - elif not space.isinstance_w(w_other, space.w_long): - return space.w_NotImplemented - # XXX need to replicate some of the logic, to get the errors right - if w_other.num.sign < 0: + if w_other.asbigint().sign < 0: raise operationerrfmt(space.w_ValueError, "negative shift count") try: - shift = w_other.num.toint() + shift = w_other.asbigint().toint() except OverflowError: # b too big # XXX maybe just return 0L instead? raise operationerrfmt(space.w_OverflowError, "shift count too large") return newlong(space, self.num.rshift(shift)) - # XXX: need rtruediv etc - def descr_truediv(self, space, w_other): - if space.isinstance_w(w_other, space.w_int): - w_other = _delegate_Int2Long(space, w_other) - elif not space.isinstance_w(w_other, space.w_long): - return space.w_NotImplemented - + @delegate_other + def descr_floordiv(self, space, w_other): try: - f = self.num.truediv(w_other.num) + z = self.num.floordiv(w_other.asbigint()) except ZeroDivisionError: raise operationerrfmt(space.w_ZeroDivisionError, "long division or modulo by zero") - except OverflowError: - raise operationerrfmt(space.w_OverflowError, - "long/long too large for a float") - return space.newfloat(f) + return newlong(space, z) + descr_div = func_with_new_name(descr_floordiv, 'descr_div') - def descr_floordiv(self, space, w_other): - if space.isinstance_w(w_other, space.w_int): - w_other = _delegate_Int2Long(space, w_other) - elif not space.isinstance_w(w_other, space.w_long): - return space.w_NotImplemented - + @delegate_other + def descr_mod(self, space, w_other): try: - z = self.num.floordiv(w_other.num) + z = self.num.mod(w_other.asbigint()) except ZeroDivisionError: raise operationerrfmt(space.w_ZeroDivisionError, "long division or modulo by zero") return newlong(space, z) - def descr_div(self, space, w_other): - if space.isinstance_w(w_other, space.w_int): - w_other = _delegate_Int2Long(space, w_other) - elif not space.isinstance_w(w_other, space.w_long): - return space.w_NotImplemented - - return self.floordiv(space, w_other) - - def descr_mod(self, space, w_other): - if space.isinstance_w(w_other, space.w_int): - w_other = _delegate_Int2Long(space, w_other) - elif not space.isinstance_w(w_other, space.w_long): - return space.w_NotImplemented - + @delegate_other + def descr_divmod(self, space, w_other): try: - z = self.num.mod(w_other.num) - except ZeroDivisionError: - raise operationerrfmt(space.w_ZeroDivisionError, - "long division or modulo by zero") - return newlong(space, z) - - def descr_divmod(self, space, w_other): - if space.isinstance_w(w_other, space.w_int): - w_other = _delegate_Int2Long(space, w_other) - elif not space.isinstance_w(w_other, space.w_long): - return space.w_NotImplemented - - try: - div, mod = self.num.divmod(w_other.num) + div, mod = self.num.divmod(w_other.asbigint()) except ZeroDivisionError: raise operationerrfmt(space.w_ZeroDivisionError, "long division or modulo by zero") return space.newtuple([newlong(space, div), newlong(space, mod)]) + #@delegate_other # XXX: @unwrap_spec(w_modulus=WrappedDefault(None)) def descr_pow(self, space, w_exponent, w_modulus=None): if space.isinstance_w(w_exponent, space.w_int): w_exponent = _delegate_Int2Long(space, w_exponent) elif not space.isinstance_w(w_exponent, space.w_long): return space.w_NotImplemented - if space.isinstance_w(w_modulus, space.w_int): + assert isinstance(w_exponent, W_AbstractLongObject) + + #if space.is_none(w_modulus): + # from pypy.objspace.std.floatobject import delegate_Long2Float + # self = delegate_Long2Float(space, self) + # w_exponent = delegate_Long2Float(space, w_exponent) + # return space.pow(self, w_exponent, w_modulus) + #elif space.isinstance_w(w_modulus, space.w_int): + if space.is_none(w_modulus): + # XXX need to replicate some of the logic, to get the errors right + if w_exponent.asbigint().sign < 0: + from pypy.objspace.std.floatobject import delegate_Long2Float + w_exponent = delegate_Long2Float(space, w_exponent) + return space.pow(self.descr_float(space), w_exponent, space.w_None if w_modulus is None else w_modulus) + return W_LongObject(self.num.pow(w_exponent.asbigint(), None)) + elif space.isinstance_w(w_modulus, space.w_int): w_modulus = _delegate_Int2Long(space, w_modulus) - elif space.is_none(w_modulus): - # XXX need to replicate some of the logic, to get the errors right - if w_exponent.num.sign < 0: - return space.pow(self.descr_float(space), w_exponent, w_modulus) - return W_LongObject(self.num.pow(w_exponent.num, None)) + #elif space.is_none(w_modulus): + # # XXX need to replicate some of the logic, to get the errors right + # if w_exponent.num.sign < 0: + # return space.pow(self.descr_float(space), w_exponent, w_modulus) + # return W_LongObject(self.num.pow(w_exponent.num, None)) elif not space.isinstance_w(w_modulus, space.w_long): return space.w_NotImplemented + assert isinstance(w_modulus, W_AbstractLongObject) - # XXX need to replicate some of the logic, to get the errors right - if w_exponent.num.sign < 0: + if w_exponent.asbigint().sign < 0: raise OperationError( space.w_TypeError, space.wrap( "pow() 2nd argument " "cannot be negative when 3rd argument specified")) try: - return W_LongObject(self.num.pow(w_exponent.num, w_modulus.num)) + return W_LongObject(self.num.pow(w_exponent.asbigint(), + w_modulus.asbigint())) except ValueError: raise OperationError(space.w_ValueError, space.wrap("pow 3rd argument cannot be 0")) @@ -398,11 +464,6 @@ return W_LongObject(bigint) -def _delegate_Int2Long(space, w_intobj): - """int-to-long delegation""" - return W_LongObject.fromint(space, w_intobj.int_w(space)) - - # register implementations of ops that recover int op overflows def recover_with_smalllong(space): # True if there is a chance that a SmallLong would fit when an Int does not @@ -420,7 +481,8 @@ return %(opname)s_ovr(space, w_int1, w_int2) w_long1 = _delegate_Int2Long(space, w_int1) w_long2 = _delegate_Int2Long(space, w_int2) - return %(opname)s__Long_Long(space, w_long1, w_long2) + #return %(opname)s__Long_Long(space, w_long1, w_long2) + return w_long1.descr_%(opname)s(space, w_long2) """ % {'opname': opname}, '', 'exec') getattr(model.MM, opname).register(globals()['%s_ovr__Int_Int' % opname], @@ -434,7 +496,8 @@ from pypy.objspace.std.smalllongobject import %(opname)s_ovr return %(opname)s_ovr(space, w_int1) w_long1 = _delegate_Int2Long(space, w_int1) - return %(opname)s__Long(space, w_long1) + #return %(opname)s__Long(space, w_long1) + return w_long1.descr_%(opname)s(space) """ % {'opname': opname} getattr(model.MM, opname).register(globals()['%s_ovr__Int' % opname], @@ -447,12 +510,14 @@ return pow_ovr(space, w_int1, w_int2) w_long1 = _delegate_Int2Long(space, w_int1) w_long2 = _delegate_Int2Long(space, w_int2) - return pow__Long_Long_None(space, w_long1, w_long2, w_none3) + #return pow__Long_Long_None(space, w_long1, w_long2, w_none3) + return w_long1.descr_pow(space, w_long2, w_none3) def pow_ovr__Int_Int_Long(space, w_int1, w_int2, w_long3): w_long1 = _delegate_Int2Long(space, w_int1) w_long2 = _delegate_Int2Long(space, w_int2) - return pow__Long_Long_Long(space, w_long1, w_long2, w_long3) + #return pow__Long_Long_Long(space, w_long1, w_long2, w_long3) + return w_long1.descr_pow(space, w_long2, w_long3) model.MM.pow.register(pow_ovr__Int_Int_None, W_IntObject, W_IntObject, W_NoneObject, order=1) @@ -515,7 +580,7 @@ try: bigint = rbigint.fromstr(s, base) except ParseStringError as e: - raise operationerrfmt(space.w_ValueError, e.msg) + raise OperationError(space.w_ValueError, space.wrap(e.msg)) return newbigint(space, w_longtype, bigint) string_to_w_long._dont_inline_ = True @@ -540,7 +605,7 @@ return w_obj -W_LongObject.typedef = StdTypeDef("long", +W_AbstractLongObject.typedef = StdTypeDef("long", __doc__ = """long(x[, base]) -> integer Convert a string or number to a long integer, if possible. A floating @@ -549,66 +614,70 @@ string, use the optional base. It is an error to supply a base when converting a non-string.""", __new__ = interp2app(descr__new__), - conjugate = interp2app(W_LongObject.descr_conjugate), + conjugate = interp2app(W_AbstractLongObject.descr_conjugate), + # XXX: need indirect for these numerator = typedef.GetSetProperty(W_LongObject.descr_get_numerator), denominator = typedef.GetSetProperty(W_LongObject.descr_get_denominator), real = typedef.GetSetProperty(W_LongObject.descr_get_real), imag = typedef.GetSetProperty(W_LongObject.descr_get_imag), - bit_length = interp2app(W_LongObject.descr_get_bit_length), + bit_length = interp2app(W_AbstractLongObject.descr_bit_length), # XXX: likely need indirect everything for SmallLong __int__ = interpindirect2app(W_AbstractLongObject.int), - __long__ = interp2app(W_LongObject.descr_long), - __trunc__ = interp2app(W_LongObject.descr_trunc), - __index__ = interp2app(W_LongObject.descr_index), - __float__ = interp2app(W_LongObject.descr_float), - __repr__ = interp2app(W_LongObject.descr_repr), - __str__ = interp2app(W_LongObject.descr_str), - __format__ = interp2app(W_LongObject.descr_format), + __long__ = interpindirect2app(W_AbstractLongObject.descr_long), + __index__ = interpindirect2app(W_AbstractLongObject.descr_index), + __trunc__ = interpindirect2app(W_AbstractLongObject.descr_trunc), + __float__ = interpindirect2app(W_AbstractLongObject.descr_float), - __hash__ = interp2app(W_LongObject.descr_hash), - __coerce__ = interp2app(W_LongObject.descr_coerce), + __repr__ = interp2app(W_AbstractLongObject.descr_repr), + __str__ = interp2app(W_AbstractLongObject.descr_str), + __format__ = interp2app(W_AbstractLongObject.descr_format), - __lt__ = interp2app(W_LongObject.descr_lt), - __le__ = interp2app(W_LongObject.descr_le), - __eq__ = interp2app(W_LongObject.descr_eq), - __ne__ = interp2app(W_LongObject.descr_ne), - __gt__ = interp2app(W_LongObject.descr_gt), - __ge__ = interp2app(W_LongObject.descr_ge), + __hash__ = interp2app(W_AbstractLongObject.descr_hash), + __coerce__ = interp2app(W_AbstractLongObject.descr_coerce), - __add__ = interp2app(W_LongObject.descr_add), - __radd__ = interp2app(W_LongObject.descr_radd), - __sub__ = interp2app(W_LongObject.descr_sub), - __rsub__ = interp2app(W_LongObject.descr_rsub), - __mul__ = interp2app(W_LongObject.descr_mul), - __rmul__ = interp2app(W_LongObject.descr_rmul), + __lt__ = interpindirect2app(W_AbstractLongObject.descr_lt), + __le__ = interpindirect2app(W_AbstractLongObject.descr_le), + __eq__ = interpindirect2app(W_AbstractLongObject.descr_eq), + __ne__ = interpindirect2app(W_AbstractLongObject.descr_ne), + __gt__ = interpindirect2app(W_AbstractLongObject.descr_gt), + __ge__ = interpindirect2app(W_AbstractLongObject.descr_ge), - __and__ = interp2app(W_LongObject.descr_and), - __rand__ = interp2app(W_LongObject.descr_rand), - __or__ = interp2app(W_LongObject.descr_or), - __ror__ = interp2app(W_LongObject.descr_ror), - __xor__ = interp2app(W_LongObject.descr_xor), - __rxor__ = interp2app(W_LongObject.descr_rxor), + __add__ = interpindirect2app(W_AbstractLongObject.descr_add), + __radd__ = interpindirect2app(W_AbstractLongObject.descr_radd), + __sub__ = interpindirect2app(W_AbstractLongObject.descr_sub), + __rsub__ = interpindirect2app(W_AbstractLongObject.descr_rsub), + __mul__ = interpindirect2app(W_AbstractLongObject.descr_mul), + __rmul__ = interpindirect2app(W_AbstractLongObject.descr_rmul), - __neg__ = interp2app(W_LongObject.descr_neg), - __pos__ = interp2app(W_LongObject.descr_pos), - __abs__ = interp2app(W_LongObject.descr_abs), - __nonzero__ = interp2app(W_LongObject.descr_nonzero), - __invert__ = interp2app(W_LongObject.descr_invert), - __oct__ = interp2app(W_LongObject.descr_oct), - __hex__ = interp2app(W_LongObject.descr_hex), + __and__ = interpindirect2app(W_AbstractLongObject.descr_and), + __rand__ = interpindirect2app(W_AbstractLongObject.descr_rand), + __or__ = interpindirect2app(W_AbstractLongObject.descr_or), + __ror__ = interpindirect2app(W_AbstractLongObject.descr_ror), + __xor__ = interpindirect2app(W_AbstractLongObject.descr_xor), + __rxor__ = interpindirect2app(W_AbstractLongObject.descr_rxor), - __lshift__ = interp2app(W_LongObject.descr_lshift), - __rshift__ = interp2app(W_LongObject.descr_rshift), + __neg__ = interpindirect2app(W_AbstractLongObject.descr_neg), + __pos__ = interpindirect2app(W_AbstractLongObject.descr_pos), + __abs__ = interpindirect2app(W_AbstractLongObject.descr_abs), + __nonzero__ = interpindirect2app(W_AbstractLongObject.descr_nonzero), + __invert__ = interpindirect2app(W_AbstractLongObject.descr_invert), - __truediv__ = interp2app(W_LongObject.descr_truediv), - __floordiv__ = interp2app(W_LongObject.descr_floordiv), - __div__ = interp2app(W_LongObject.descr_div), - __mod__ = interp2app(W_LongObject.descr_mod), - __divmod__ = interp2app(W_LongObject.descr_divmod), + __oct__ = interp2app(W_AbstractLongObject.descr_oct), + __hex__ = interp2app(W_AbstractLongObject.descr_hex), - __pow__ = interp2app(W_LongObject.descr_pow), - __rpow__ = interp2app(W_LongObject.descr_rpow), + __lshift__ = interpindirect2app(W_AbstractLongObject.descr_lshift), + __rshift__ = interpindirect2app(W_AbstractLongObject.descr_rshift), - __getnewargs__ = interp2app(W_LongObject.descr_getnewargs), + # XXX: all these need r sides + __truediv__ = interp2app(W_AbstractLongObject.descr_truediv), + __floordiv__ = interpindirect2app(W_AbstractLongObject.descr_floordiv), + __div__ = interpindirect2app(W_AbstractLongObject.descr_div), + __mod__ = interpindirect2app(W_AbstractLongObject.descr_mod), + __divmod__ = interpindirect2app(W_AbstractLongObject.descr_divmod), + + __pow__ = interpindirect2app(W_AbstractLongObject.descr_pow), + __rpow__ = interpindirect2app(W_AbstractLongObject.descr_rpow), + + __getnewargs__ = interp2app(W_AbstractLongObject.descr_getnewargs), ) diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -143,7 +143,7 @@ ] self.typeorder[intobject.W_IntObject] += [ (floatobject.W_FloatObject, floatobject.delegate_Int2Float), - (longobject.W_LongObject, longobject.delegate_Int2Long), +# (longobject.W_LongObject, longobject.delegate_Int2Long), (complexobject.W_ComplexObject, complexobject.delegate_Int2Complex), ] if False and config.objspace.std.withsmalllong: diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -3,22 +3,23 @@ Useful for 32-bit applications manipulating values a bit larger than fits in an 'int'. """ -from pypy.objspace.std.model import registerimplementation, W_Object -from pypy.objspace.std.register_all import register_all +import operator + +from rpython.rlib.rarithmetic import LONGLONG_BIT, intmask, r_longlong, r_uint +from rpython.rlib.rbigint import rbigint +from rpython.tool.sourcetools import func_with_new_name + +from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import WrappedDefault, unwrap_spec from pypy.objspace.std.multimethod import FailedToImplementArgs -from rpython.rlib.rarithmetic import r_longlong, r_int, r_uint -from rpython.rlib.rarithmetic import intmask, LONGLONG_BIT -from rpython.rlib.rbigint import rbigint from pypy.objspace.std.longobject import W_AbstractLongObject, W_LongObject -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.noneobject import W_NoneObject -from pypy.interpreter.error import OperationError +from pypy.objspace.std.intobject import _delegate_Int2Long LONGLONG_MIN = r_longlong((-1) << (LONGLONG_BIT-1)) class W_SmallLongObject(W_AbstractLongObject): - from pypy.objspace.std.longtype import long_typedef as typedef + _immutable_fields_ = ['longlong'] def __init__(w_self, value): @@ -77,8 +78,331 @@ else: return self + def descr_long(self, space): + # XXX: do subclasses never apply here? + return self + descr_index = func_with_new_name(descr_long, 'descr_index') + descr_trunc = func_with_new_name(descr_long, 'descr_trunc') + descr_pos = func_with_new_name(descr_long, 'descr_pos') + + def descr_index(self, space): + return self + + def descr_float(self, space): + return space.newfloat(float(self.longlong)) + + def _make_descr_cmp(opname): + op = getattr(operator, opname) + def descr_impl(self, space, w_other): + if space.isinstance_w(w_other, space.w_int): + result = op(self.longlong, w_other.int_w(space)) + elif not space.isinstance_w(w_other, space.w_long): + return space.w_NotImplemented + elif isinstance(w_other, W_SmallLongObject): + result = op(self.longlong, w_other.longlong) + else: + result = getattr(self.asbigint(), opname)(w_other.num) + return space.newbool(result) + return func_with_new_name(descr_impl, "descr_" + opname) + + descr_lt = _make_descr_cmp('lt') + descr_le = _make_descr_cmp('le') + descr_eq = _make_descr_cmp('eq') + descr_ne = _make_descr_cmp('ne') + descr_gt = _make_descr_cmp('gt') + descr_ge = _make_descr_cmp('ge') + + def _make_descr_binop(func): + # XXX: so if w_other is Long, what do we do? sigh + # how to handle delegation with descr_add on longobject? + opname = func.__name__[1:] + methname = opname + '_' if opname in ('and', 'or') else opname + + def descr_impl(self, space, w_other): + if space.isinstance_w(w_other, space.w_int): + w_other = delegate_Int2SmallLong(space, w_other) + elif not space.isinstance_w(w_other, space.w_long): + return space.w_NotImplemented + elif not isinstance(w_other, W_SmallLongObject): + self = delegate_SmallLong2Long(space, self) + return getattr(space, methname)(self, w_other) + + try: + return func(self, space, w_other) + except OverflowError: + self = delegate_SmallLong2Long(space, self) + w_other = delegate_SmallLong2Long(space, w_other) + return getattr(space, methname)(self, w_other) + + def descr_rimpl(self, space, w_other): + if space.isinstance_w(w_other, space.w_int): + w_other = delegate_Int2SmallLong(space, w_other) + elif not space.isinstance_w(w_other, space.w_long): + return space.w_NotImplemented + elif not isinstance(w_other, W_SmallLongObject): + self = delegate_SmallLong2Long(space, self) + return getattr(space, methname)(w_other, self) + + try: + return func(w_other, space, self) + except OverflowError: + self = delegate_SmallLong2Long(space, self) + w_other = delegate_SmallLong2Long(space, w_other) + return getattr(space, methname)(w_other, self) + + return descr_impl, descr_rimpl + + def _add(self, space, w_other): + x = self.longlong + y = w_other.longlong + z = x + y + if ((z^x)&(z^y)) < 0: + raise OverflowError + return W_SmallLongObject(z) + descr_add, descr_radd = _make_descr_binop(_add) + + def _sub(self, space, w_other): + x = self.longlong + y = w_other.longlong + z = x - y + if ((z^x)&(z^~y)) < 0: + raise OverflowError + return W_SmallLongObject(z) + descr_sub, descr_rsub = _make_descr_binop(_sub) + + def _mul(self, space, w_other): + x = self.longlong + y = w_other.longlong + z = llong_mul_ovf(x, y) + return W_SmallLongObject(z) + descr_mul, descr_rmul = _make_descr_binop(_mul) + + def _floordiv(self, space, w_other): + x = self.longlong + y = w_other.longlong + try: + if y == -1 and x == LONGLONG_MIN: + raise OverflowError + z = x // y + except ZeroDivisionError: + raise OperationError(space.w_ZeroDivisionError, + space.wrap("integer division by zero")) + #except OverflowError: + # raise FailedToImplementArgs(space.w_OverflowError, + # space.wrap("integer division")) + return W_SmallLongObject(z) + descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv) + + _div = func_with_new_name(_floordiv, '_div') + descr_div, descr_rdiv = _make_descr_binop(_div) + + def _mod(self, space, w_other): + x = self.longlong + y = w_other.longlong + try: + if y == -1 and x == LONGLONG_MIN: + raise OverflowError + z = x % y + except ZeroDivisionError: + raise OperationError(space.w_ZeroDivisionError, + space.wrap("integer modulo by zero")) + #except OverflowError: + # raise FailedToImplementArgs(space.w_OverflowError, + # space.wrap("integer modulo")) + return W_SmallLongObject(z) + descr_mod, descr_rmod = _make_descr_binop(_mod) + + def _divmod(self, space, w_other): + x = self.longlong + y = w_other.longlong + try: + if y == -1 and x == LONGLONG_MIN: + raise OverflowError + z = x // y + except ZeroDivisionError: + raise OperationError(space.w_ZeroDivisionError, + space.wrap("integer divmod by zero")) + #except OverflowError: + # raise FailedToImplementArgs(space.w_OverflowError, + # space.wrap("integer modulo")) + # no overflow possible + m = x % y + return space.newtuple([W_SmallLongObject(z), W_SmallLongObject(m)]) + descr_divmod, descr_rdivmod = _make_descr_binop(_divmod) + + # XXX: + @unwrap_spec(w_modulus=WrappedDefault(None)) + #def descr_pow__SmallLong_Int_SmallLong(self, space, w_exponent, + def descr_pow(self, space, w_exponent, w_modulus=None): + if space.isinstance_w(w_exponent, space.w_long): + self = delegate_SmallLong2Long(space, self) + return space.pow(self, w_exponent, w_modulus) + elif not space.isinstance_w(w_exponent, space.w_int): + return space.w_NotImplemented -registerimplementation(W_SmallLongObject) + # XXX: this expects w_exponent as an int o_O + """ + if space.isinstance_w(w_exponent, space.w_int): + w_exponent = delegate_Int2SmallLong(space, w_exponent) + elif not space.isinstance_w(w_exponent, space.w_long): + return space.w_NotImplemented + elif not isinstance(w_exponent, W_SmallLongObject): + self = delegate_SmallLong2Long(space, self) + return space.pow(self, w_exponent, w_modulus) + """ + + if space.is_none(w_modulus): + #return _impl_pow(space, self.longlong, w_exponent) + try: + return _impl_pow(space, self.longlong, w_exponent) + except ValueError: + self = delegate_SmallLong2Float(space, self) + except OverflowError: + self = delegate_SmallLong2Long(space, self) + return space.pow(self, w_exponent, w_modulus) + elif space.isinstance_w(w_modulus, space.w_int): + w_modulus = delegate_Int2SmallLong(space, w_modulus) + elif not space.isinstance_w(w_modulus, space.w_long): + return space.w_NotImplemented + elif not isinstance(w_modulus, W_SmallLongObject): + self = delegate_SmallLong2Long(space, self) + #return space.pow(self, w_modulus, w_modulus) + return space.pow(self, w_exponent, w_modulus) + + z = w_modulus.longlong + if z == 0: + raise OperationError(space.w_ValueError, + space.wrap("pow() 3rd argument cannot be 0")) + try: + return _impl_pow(space, self.longlong, w_exponent, z) + except ValueError: + self = delegate_SmallLong2Float(space, self) + except OverflowError: + self = delegate_SmallLong2Long(space, self) + return space.pow(self, w_exponent, w_modulus) + + # XXX: + @unwrap_spec(w_modulus=WrappedDefault(None)) + def descr_rpow(self, space, w_exponent, w_modulus=None): + # XXX: blargh + if space.isinstance_w(w_exponent, space.w_int): + w_exponent = _delegate_Int2Long(space, w_exponent) + elif not space.isinstance_w(w_exponent, space.w_long): + return space.w_NotImplemented + return space.pow(w_exponent, self, w_modulus) + + #def descr_lshift__SmallLong_Int(space, w_small1, w_int2): + def descr_lshift(self, space, w_other): + if space.isinstance_w(w_other, space.w_long): + self = delegate_SmallLong2Long(space, self) + w_other = delegate_SmallLong2Long(space, w_other) + return space.lshift(self, w_other) + elif not space.isinstance_w(w_other, space.w_int): + return space.w_NotImplemented + + a = self.longlong + b = w_other.intval + if r_uint(b) < LONGLONG_BIT: # 0 <= b < LONGLONG_BIT + try: + c = a << b + if a != (c >> b): + raise OverflowError + except OverflowError: + #raise FailedToImplementArgs(space.w_OverflowError, + # space.wrap("integer left shift")) + self = delegate_SmallLong2Long(space, self) + w_other = _delegate_Int2Long(space, w_other) + return space.lshift(self, w_other) + return W_SmallLongObject(c) + if b < 0: + raise OperationError(space.w_ValueError, + space.wrap("negative shift count")) + else: #b >= LONGLONG_BIT + if a == 0: + return self + #raise FailedToImplementArgs(space.w_OverflowError, + # space.wrap("integer left shift")) + self = delegate_SmallLong2Long(space, self) + w_other = _delegate_Int2Long(space, w_other) + return space.lshift(self, w_other) + + def descr_rshift(self, space, w_other): + if space.isinstance_w(w_other, space.w_long): + self = delegate_SmallLong2Long(space, self) + w_other = delegate_SmallLong2Long(space, w_other) + return space.rshift(self, w_other) + elif not space.isinstance_w(w_other, space.w_int): + return space.w_NotImplemented + + a = self.longlong + b = w_other.intval + if r_uint(b) >= LONGLONG_BIT: # not (0 <= b < LONGLONG_BIT) + if b < 0: + raise OperationError(space.w_ValueError, + space.wrap("negative shift count")) + else: # b >= LONGLONG_BIT + if a == 0: + return self + if a < 0: + a = -1 + else: + a = 0 + else: + a = a >> b + return W_SmallLongObject(a) + + def _and(self, space, w_other): + a = self.longlong + b = w_other.longlong + res = a & b + return W_SmallLongObject(res) + descr_and, descr_rand = _make_descr_binop(_and) + + def _xor(self, space, w_other): + a = self.longlong + b = w_other.longlong + res = a ^ b + return W_SmallLongObject(res) + descr_xor, descr_rxor = _make_descr_binop(_xor) + + def _or(self, space, w_other): + a = self.longlong + b = w_other.longlong + res = a | b + return W_SmallLongObject(res) + descr_or, descr_ror = _make_descr_binop(_or) + + def descr_neg(self, space): + a = self.longlong + try: + if a == LONGLONG_MIN: + raise OverflowError + x = -a + except OverflowError: + return space.neg(delegate_SmallLong2Long(self)) + #raise FailedToImplementArgs(space.w_OverflowError, + # space.wrap("integer negation")) + return W_SmallLongObject(x) + #get_negint = neg__SmallLong + + #def descr_pos(self, space): + # return self + + def descr_abs(self, space): + if self.longlong >= 0: + return self + else: + #return get_negint(space, self) + return self.descr_neg(space) + + def descr_nonzero(self, space): + return space.newbool(bool(self.longlong)) + + def descr_invert(self, space): + x = self.longlong + a = ~x + return W_SmallLongObject(a) + # ____________________________________________________________ @@ -112,7 +436,8 @@ return W_SmallLongObject(r_longlong(space.is_true(w_bool))) def delegate_Int2SmallLong(space, w_int): - return W_SmallLongObject(r_longlong(w_int.intval)) + #return W_SmallLongObject(r_longlong(w_int.intval)) + return W_SmallLongObject(r_longlong(w_int.int_w(space))) def delegate_SmallLong2Long(space, w_small): return W_LongObject(w_small.asbigint()) @@ -123,194 +448,32 @@ def delegate_SmallLong2Complex(space, w_small): return space.newcomplex(float(w_small.longlong), 0.0) - -def long__SmallLong(space, w_value): - return w_value - -def index__SmallLong(space, w_value): - return w_value - -def float__SmallLong(space, w_value): - return space.newfloat(float(w_value.longlong)) - -def lt__SmallLong_SmallLong(space, w_small1, w_small2): - return space.newbool(w_small1.longlong < w_small2.longlong) -def le__SmallLong_SmallLong(space, w_small1, w_small2): - return space.newbool(w_small1.longlong <= w_small2.longlong) -def eq__SmallLong_SmallLong(space, w_small1, w_small2): - return space.newbool(w_small1.longlong == w_small2.longlong) -def ne__SmallLong_SmallLong(space, w_small1, w_small2): - return space.newbool(w_small1.longlong != w_small2.longlong) -def gt__SmallLong_SmallLong(space, w_small1, w_small2): - return space.newbool(w_small1.longlong > w_small2.longlong) -def ge__SmallLong_SmallLong(space, w_small1, w_small2): - return space.newbool(w_small1.longlong >= w_small2.longlong) - -def lt__SmallLong_Long(space, w_small1, w_long2): - return space.newbool(w_small1.asbigint().lt(w_long2.num)) -def le__SmallLong_Long(space, w_small1, w_long2): - return space.newbool(w_small1.asbigint().le(w_long2.num)) -def eq__SmallLong_Long(space, w_small1, w_long2): - return space.newbool(w_small1.asbigint().eq(w_long2.num)) -def ne__SmallLong_Long(space, w_small1, w_long2): - return space.newbool(w_small1.asbigint().ne(w_long2.num)) -def gt__SmallLong_Long(space, w_small1, w_long2): - return space.newbool(w_small1.asbigint().gt(w_long2.num)) -def ge__SmallLong_Long(space, w_small1, w_long2): - return space.newbool(w_small1.asbigint().ge(w_long2.num)) - -def lt__Long_SmallLong(space, w_long1, w_small2): - return space.newbool(w_long1.num.lt(w_small2.asbigint())) -def le__Long_SmallLong(space, w_long1, w_small2): - return space.newbool(w_long1.num.le(w_small2.asbigint())) -def eq__Long_SmallLong(space, w_long1, w_small2): - return space.newbool(w_long1.num.eq(w_small2.asbigint())) -def ne__Long_SmallLong(space, w_long1, w_small2): - return space.newbool(w_long1.num.ne(w_small2.asbigint())) -def gt__Long_SmallLong(space, w_long1, w_small2): - return space.newbool(w_long1.num.gt(w_small2.asbigint())) -def ge__Long_SmallLong(space, w_long1, w_small2): - return space.newbool(w_long1.num.ge(w_small2.asbigint())) - -def lt__SmallLong_Int(space, w_small1, w_int2): - return space.newbool(w_small1.longlong < w_int2.intval) -def le__SmallLong_Int(space, w_small1, w_int2): - return space.newbool(w_small1.longlong <= w_int2.intval) -def eq__SmallLong_Int(space, w_small1, w_int2): - return space.newbool(w_small1.longlong == w_int2.intval) -def ne__SmallLong_Int(space, w_small1, w_int2): - return space.newbool(w_small1.longlong != w_int2.intval) -def gt__SmallLong_Int(space, w_small1, w_int2): - return space.newbool(w_small1.longlong > w_int2.intval) -def ge__SmallLong_Int(space, w_small1, w_int2): - return space.newbool(w_small1.longlong >= w_int2.intval) - -def lt__Int_SmallLong(space, w_int1, w_small2): - return space.newbool(w_int1.intval < w_small2.longlong) -def le__Int_SmallLong(space, w_int1, w_small2): - return space.newbool(w_int1.intval <= w_small2.longlong) -def eq__Int_SmallLong(space, w_int1, w_small2): - return space.newbool(w_int1.intval == w_small2.longlong) -def ne__Int_SmallLong(space, w_int1, w_small2): - return space.newbool(w_int1.intval != w_small2.longlong) -def gt__Int_SmallLong(space, w_int1, w_small2): - return space.newbool(w_int1.intval > w_small2.longlong) -def ge__Int_SmallLong(space, w_int1, w_small2): - return space.newbool(w_int1.intval >= w_small2.longlong) - - -#hash: default implementation via Longs (a bit messy) - -def add__SmallLong_SmallLong(space, w_small1, w_small2): - x = w_small1.longlong - y = w_small2.longlong - try: - z = x + y - if ((z^x)&(z^y)) < 0: - raise OverflowError - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer addition")) - return W_SmallLongObject(z) - def add_ovr(space, w_int1, w_int2): x = r_longlong(w_int1.intval) y = r_longlong(w_int2.intval) return W_SmallLongObject(x + y) -def sub__SmallLong_SmallLong(space, w_small1, w_small2): - x = w_small1.longlong - y = w_small2.longlong - try: - z = x - y - if ((z^x)&(z^~y)) < 0: - raise OverflowError - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer subtraction")) - return W_SmallLongObject(z) - def sub_ovr(space, w_int1, w_int2): x = r_longlong(w_int1.intval) y = r_longlong(w_int2.intval) return W_SmallLongObject(x - y) -def mul__SmallLong_SmallLong(space, w_small1, w_small2): - x = w_small1.longlong - y = w_small2.longlong - try: - z = llong_mul_ovf(x, y) - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer multiplication")) - return W_SmallLongObject(z) - def mul_ovr(space, w_int1, w_int2): x = r_longlong(w_int1.intval) y = r_longlong(w_int2.intval) return W_SmallLongObject(x * y) -#truediv: default implementation via Longs - -def floordiv__SmallLong_SmallLong(space, w_small1, w_small2): - x = w_small1.longlong - y = w_small2.longlong - try: - if y == -1 and x == LONGLONG_MIN: - raise OverflowError - z = x // y - except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("integer division by zero")) - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer division")) - return W_SmallLongObject(z) -div__SmallLong_SmallLong = floordiv__SmallLong_SmallLong - def floordiv_ovr(space, w_int1, w_int2): x = r_longlong(w_int1.intval) y = r_longlong(w_int2.intval) return W_SmallLongObject(x // y) div_ovr = floordiv_ovr -def mod__SmallLong_SmallLong(space, w_small1, w_small2): - x = w_small1.longlong - y = w_small2.longlong - try: - if y == -1 and x == LONGLONG_MIN: - raise OverflowError - z = x % y - except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("integer modulo by zero")) - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer modulo")) - return W_SmallLongObject(z) - def mod_ovr(space, w_int1, w_int2): x = r_longlong(w_int1.intval) y = r_longlong(w_int2.intval) return W_SmallLongObject(x % y) -def divmod__SmallLong_SmallLong(space, w_small1, w_small2): - x = w_small1.longlong - y = w_small2.longlong - try: - if y == -1 and x == LONGLONG_MIN: - raise OverflowError - z = x // y - except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("integer divmod by zero")) - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer modulo")) - # no overflow possible - m = x % y - return space.newtuple([W_SmallLongObject(z), W_SmallLongObject(m)]) - def divmod_ovr(space, w_int1, w_int2): return space.newtuple([div_ovr(space, w_int1, w_int2), mod_ovr(space, w_int1, w_int2)]) @@ -322,9 +485,7 @@ raise OperationError(space.w_TypeError, space.wrap("pow() 2nd argument " "cannot be negative when 3rd argument specified")) - ## bounce it, since it always returns float - raise FailedToImplementArgs(space.w_ValueError, - space.wrap("integer exponentiation")) + raise ValueError temp = iv ix = r_longlong(1) try: @@ -342,20 +503,10 @@ if iz: ix = ix % iz except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer exponentiation")) + # XXX: + raise OverflowError return W_SmallLongObject(ix) -def pow__SmallLong_Int_SmallLong(space, w_small1, w_int2, w_small3): - z = w_small3.longlong - if z == 0: - raise OperationError(space.w_ValueError, - space.wrap("pow() 3rd argument cannot be 0")) - return _impl_pow(space, w_small1.longlong, w_int2, z) - -def pow__SmallLong_Int_None(space, w_small1, w_int2, _): - return _impl_pow(space, w_small1.longlong, w_int2) - def pow_ovr(space, w_int1, w_int2): try: return _impl_pow(space, r_longlong(w_int1.intval), w_int2) @@ -365,66 +516,15 @@ w_b = W_LongObject.fromint(space, w_int2.intval) return longobject.pow__Long_Long_None(space, w_a, w_b, space.w_None) -def neg__SmallLong(space, w_small): - a = w_small.longlong - try: - if a == LONGLONG_MIN: - raise OverflowError - x = -a - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer negation")) - return W_SmallLongObject(x) -get_negint = neg__SmallLong - def neg_ovr(space, w_int): a = r_longlong(w_int.intval) return W_SmallLongObject(-a) - -def pos__SmallLong(space, w_small): - return w_small - -def abs__SmallLong(space, w_small): - if w_small.longlong >= 0: - return w_small - else: - return get_negint(space, w_small) - def abs_ovr(space, w_int): a = r_longlong(w_int.intval) if a < 0: a = -a return W_SmallLongObject(a) -def nonzero__SmallLong(space, w_small): - return space.newbool(bool(w_small.longlong)) - -def invert__SmallLong(space, w_small): - x = w_small.longlong - a = ~x - return W_SmallLongObject(a) - -def lshift__SmallLong_Int(space, w_small1, w_int2): - a = w_small1.longlong - b = w_int2.intval - if r_uint(b) < LONGLONG_BIT: # 0 <= b < LONGLONG_BIT - try: - c = a << b - if a != (c >> b): - raise OverflowError - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer left shift")) - return W_SmallLongObject(c) - if b < 0: - raise OperationError(space.w_ValueError, - space.wrap("negative shift count")) - else: #b >= LONGLONG_BIT - if a == 0: - return w_small1 - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer left shift")) - def lshift_ovr(space, w_int1, w_int2): a = r_longlong(w_int1.intval) try: @@ -434,45 +534,3 @@ w_a = W_LongObject.fromint(space, w_int1.intval) w_b = W_LongObject.fromint(space, w_int2.intval) return longobject.lshift__Long_Long(space, w_a, w_b) - -def rshift__SmallLong_Int(space, w_small1, w_int2): - a = w_small1.longlong - b = w_int2.intval - if r_uint(b) >= LONGLONG_BIT: # not (0 <= b < LONGLONG_BIT) - if b < 0: - raise OperationError(space.w_ValueError, - space.wrap("negative shift count")) - else: # b >= LONGLONG_BIT - if a == 0: - return w_small1 - if a < 0: - a = -1 - else: - a = 0 - else: - a = a >> b - return W_SmallLongObject(a) - -def and__SmallLong_SmallLong(space, w_small1, w_small2): - a = w_small1.longlong - b = w_small2.longlong - res = a & b - return W_SmallLongObject(res) - -def xor__SmallLong_SmallLong(space, w_small1, w_small2): - a = w_small1.longlong - b = w_small2.longlong - res = a ^ b - return W_SmallLongObject(res) - -def or__SmallLong_SmallLong(space, w_small1, w_small2): - a = w_small1.longlong - b = w_small2.longlong - res = a | b - return W_SmallLongObject(res) - -#oct: default implementation via Longs -#hex: default implementation via Longs -#getnewargs: default implementation via Longs - -register_all(vars()) From noreply at buildbot.pypy.org Mon Nov 11 23:27:23 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 11 Nov 2013 23:27:23 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: comment Message-ID: <20131111222723.F2E851C0E1C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r67964:5589ab3cae12 Date: 2013-11-11 14:26 -0800 http://bitbucket.org/pypy/pypy/changeset/5589ab3cae12/ Log: comment diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -406,6 +406,8 @@ if w_exponent.asbigint().sign < 0: from pypy.objspace.std.floatobject import delegate_Long2Float w_exponent = delegate_Long2Float(space, w_exponent) + # XXX: hack around multimethod annoyances for now (when + # w_modulus=None) return space.pow(self.descr_float(space), w_exponent, space.w_None if w_modulus is None else w_modulus) return W_LongObject(self.num.pow(w_exponent.asbigint(), None)) elif space.isinstance_w(w_modulus, space.w_int): From noreply at buildbot.pypy.org Mon Nov 11 23:27:25 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 11 Nov 2013 23:27:25 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: merge default Message-ID: <20131111222725.1B1801C1041@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r67965:ab62397eb222 Date: 2013-11-11 14:26 -0800 http://bitbucket.org/pypy/pypy/changeset/ab62397eb222/ Log: merge default diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -52,16 +52,16 @@ from pypy.objspace.std.complextype import complex_typedef as typedef _immutable_fields_ = ['realval', 'imagval'] - def __init__(w_self, realval=0.0, imgval=0.0): - w_self.realval = float(realval) - w_self.imagval = float(imgval) + def __init__(self, realval=0.0, imgval=0.0): + self.realval = float(realval) + self.imagval = float(imgval) - def unwrap(w_self, space): # for tests only - return complex(w_self.realval, w_self.imagval) + def unwrap(self, space): # for tests only + return complex(self.realval, self.imagval) - def __repr__(w_self): + def __repr__(self): """ representation for debugging purposes """ - return "" % (w_self.realval, w_self.imagval) + return "" % (self.realval, self.imagval) def as_tuple(self): return (self.realval, self.imagval) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -27,11 +27,11 @@ typedef = float_typedef - def __init__(w_self, floatval): - w_self.floatval = floatval + def __init__(self, floatval): + self.floatval = floatval - def unwrap(w_self, space): - return w_self.floatval + def unwrap(self, space): + return self.floatval def float_w(self, space): return self.floatval diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -22,9 +22,9 @@ _immutable_fields_ = ['longlong'] - def __init__(w_self, value): + def __init__(self, value): assert isinstance(value, r_longlong) - w_self.longlong = value + self.longlong = value @staticmethod def fromint(value): @@ -34,17 +34,17 @@ def frombigint(bigint): return W_SmallLongObject(bigint.tolonglong()) - def asbigint(w_self): - return rbigint.fromrarith_int(w_self.longlong) + def asbigint(self): + return rbigint.fromrarith_int(self.longlong) def longval(self): return self.longlong - def __repr__(w_self): - return '' % w_self.longlong + def __repr__(self): + return '' % self.longlong - def int_w(w_self, space): - a = w_self.longlong + def int_w(self, space): + a = self.longlong b = intmask(a) if b == a: return b @@ -52,8 +52,8 @@ raise OperationError(space.w_OverflowError, space.wrap( "long int too large to convert to int")) - def uint_w(w_self, space): - a = w_self.longlong + def uint_w(self, space): + a = self.longlong if a < 0: raise OperationError(space.w_ValueError, space.wrap( "cannot convert negative integer to unsigned int")) @@ -64,8 +64,8 @@ raise OperationError(space.w_OverflowError, space.wrap( "long int too large to convert to unsigned int")) - def bigint_w(w_self, space): - return w_self.asbigint() + def bigint_w(self, space): + return self.asbigint() def float_w(self, space): return float(self.longlong) From noreply at buildbot.pypy.org Tue Nov 12 02:45:16 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 12 Nov 2013 02:45:16 +0100 (CET) Subject: [pypy-commit] pypy default: test/fix some numpy dtype creation from tuple cases Message-ID: <20131112014516.0C9F81C00B3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67966:5dc341236760 Date: 2013-11-11 20:37 -0500 http://bitbucket.org/pypy/pypy/changeset/5dc341236760/ Log: test/fix some numpy dtype creation from tuple cases diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -382,7 +382,14 @@ elif space.isinstance_w(w_dtype, space.w_list): return dtype_from_list(space, w_dtype) elif space.isinstance_w(w_dtype, space.w_tuple): - return descr__new__(space, w_subtype, space.getitem(w_dtype, space.wrap(0)), w_align, w_copy, w_shape=space.getitem(w_dtype, space.wrap(1))) + w_dtype0 = space.getitem(w_dtype, space.wrap(0)) + w_dtype1 = space.getitem(w_dtype, space.wrap(1)) + subdtype = descr__new__(space, w_subtype, w_dtype0, w_align, w_copy) + assert isinstance(subdtype, W_Dtype) + if subdtype.get_size() == 0: + name = "%s%d" % (subdtype.kind, space.int_w(w_dtype1)) + return descr__new__(space, w_subtype, space.wrap(name), w_align, w_copy) + return descr__new__(space, w_subtype, w_dtype0, w_align, w_copy, w_shape=w_dtype1) elif space.isinstance_w(w_dtype, space.w_dict): return dtype_from_dict(space, w_dtype) for dtype in cache.builtin_dtypes: diff --git a/pypy/module/micronumpy/test/dummy_module.py b/pypy/module/micronumpy/test/dummy_module.py --- a/pypy/module/micronumpy/test/dummy_module.py +++ b/pypy/module/micronumpy/test/dummy_module.py @@ -20,7 +20,7 @@ for t in types: globals()[t] = dtype(t).type -types = ['bool', 'int', 'float', 'complex', 'str', 'unicode'] +types = ['bool', 'int', 'float', 'complex', 'str', 'string', 'unicode'] for t in types: globals()[t + '_'] = dtype(t).type del types diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -46,6 +46,18 @@ assert 'data type not understood' in str(exc.value) raises(KeyError, 'dtype(int)["asdasd"]') + def test_dtype_from_tuple(self): + import numpy as np + d = np.dtype((np.int64, 4)) + assert d == np.dtype(('i8', (4,))) + assert d.shape == (4,) + d = np.dtype((np.string_, 4)) + assert d == np.dtype('S4') + assert d.shape == () + d = np.dtype(('S', 4)) + assert d == np.dtype('S4') + assert d.shape == () + def test_dtype_eq(self): from numpypy import dtype From noreply at buildbot.pypy.org Tue Nov 12 08:30:50 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 12 Nov 2013 08:30:50 +0100 (CET) Subject: [pypy-commit] pypy default: update project-ideas Message-ID: <20131112073050.ACD621C0206@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r67967:c8cb82052fbf Date: 2013-11-12 09:30 +0200 http://bitbucket.org/pypy/pypy/changeset/c8cb82052fbf/ Log: update project-ideas diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -158,6 +158,8 @@ Embedding PyPy ---------------------------------------- +Note: there is a basic proof-of-concept for that as a `uwsgi pypy plugin`_ + Being able to embed PyPy, say with its own limited C API, would be useful. But here is the most interesting variant, straight from EuroPython live discussion :-) We can have a generic "libpypy.so" that @@ -166,6 +168,8 @@ exported. This would give us a one-size-fits-all generic .so file to be imported by any application that wants to load .so files :-) +.. _`uwsgi pypy plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html + Optimising cpyext (CPython C-API compatibility layer) ----------------------------------------------------- From noreply at buildbot.pypy.org Tue Nov 12 08:46:33 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 12 Nov 2013 08:46:33 +0100 (CET) Subject: [pypy-commit] pypy default: this has been done Message-ID: <20131112074633.9BFF31C019D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r67968:83ebf63bc891 Date: 2013-11-12 09:45 +0200 http://bitbucket.org/pypy/pypy/changeset/83ebf63bc891/ Log: this has been done diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -90,9 +90,6 @@ collectors can be written for specialized purposes, or even various experiments can be done for the general purpose. Examples: -* An incremental garbage collector that has specified maximal pause times, - crucial for games - * A garbage collector that compact memory better for mobile devices * A concurrent garbage collector (a lot of work) From noreply at buildbot.pypy.org Tue Nov 12 08:47:41 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 12 Nov 2013 08:47:41 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: test/fix some numpy dtype creation from tuple cases Message-ID: <20131112074741.600761C019D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: release-2.2.x Changeset: r67969:7b5eb1c5a0d1 Date: 2013-11-11 20:37 -0500 http://bitbucket.org/pypy/pypy/changeset/7b5eb1c5a0d1/ Log: test/fix some numpy dtype creation from tuple cases (transplanted from 5dc3412367605bff81ba8a4bfa7372670257bc77) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -382,7 +382,14 @@ elif space.isinstance_w(w_dtype, space.w_list): return dtype_from_list(space, w_dtype) elif space.isinstance_w(w_dtype, space.w_tuple): - return descr__new__(space, w_subtype, space.getitem(w_dtype, space.wrap(0)), w_align, w_copy, w_shape=space.getitem(w_dtype, space.wrap(1))) + w_dtype0 = space.getitem(w_dtype, space.wrap(0)) + w_dtype1 = space.getitem(w_dtype, space.wrap(1)) + subdtype = descr__new__(space, w_subtype, w_dtype0, w_align, w_copy) + assert isinstance(subdtype, W_Dtype) + if subdtype.get_size() == 0: + name = "%s%d" % (subdtype.kind, space.int_w(w_dtype1)) + return descr__new__(space, w_subtype, space.wrap(name), w_align, w_copy) + return descr__new__(space, w_subtype, w_dtype0, w_align, w_copy, w_shape=w_dtype1) elif space.isinstance_w(w_dtype, space.w_dict): return dtype_from_dict(space, w_dtype) for dtype in cache.builtin_dtypes: diff --git a/pypy/module/micronumpy/test/dummy_module.py b/pypy/module/micronumpy/test/dummy_module.py --- a/pypy/module/micronumpy/test/dummy_module.py +++ b/pypy/module/micronumpy/test/dummy_module.py @@ -20,7 +20,7 @@ for t in types: globals()[t] = dtype(t).type -types = ['bool', 'int', 'float', 'complex', 'str', 'unicode'] +types = ['bool', 'int', 'float', 'complex', 'str', 'string', 'unicode'] for t in types: globals()[t + '_'] = dtype(t).type del types diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -46,6 +46,18 @@ assert 'data type not understood' in str(exc.value) raises(KeyError, 'dtype(int)["asdasd"]') + def test_dtype_from_tuple(self): + import numpy as np + d = np.dtype((np.int64, 4)) + assert d == np.dtype(('i8', (4,))) + assert d.shape == (4,) + d = np.dtype((np.string_, 4)) + assert d == np.dtype('S4') + assert d.shape == () + d = np.dtype(('S', 4)) + assert d == np.dtype('S4') + assert d.shape == () + def test_dtype_eq(self): from numpypy import dtype From noreply at buildbot.pypy.org Tue Nov 12 12:42:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 12:42:58 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Add os.startfile() using cffi. Message-ID: <20131112114258.E92D71C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67970:4a3ba24a561a Date: 2013-11-12 12:42 +0100 http://bitbucket.org/pypy/pypy/changeset/4a3ba24a561a/ Log: Add os.startfile() using cffi. diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -27,6 +27,7 @@ 'popen2': 'app_posix.popen2', 'popen3': 'app_posix.popen3', 'popen4': 'app_posix.popen4', + 'startfile': 'app_startfile.startfile', }) if hasattr(os, 'wait'): diff --git a/pypy/module/posix/app_startfile.py b/pypy/module/posix/app_startfile.py new file mode 100644 --- /dev/null +++ b/pypy/module/posix/app_startfile.py @@ -0,0 +1,44 @@ + +class CFFIWrapper(object): + def __init__(self): + import cffi + ffi = cffi.FFI() + ffi.cdef(""" + HINSTANCE ShellExecuteA(HWND, LPCSTR, LPCSTR, LPCSTR, LPCSTR, INT); + HINSTANCE ShellExecuteW(HWND, LPCWSTR, LPCWSTR, LPCWSTR, LPCWSTR, INT); + DWORD GetLastError(void); + """) + self.NULL = ffi.NULL + self.cast = ffi.cast + self.libK = ffi.dlopen("Kernel32.dll") + self.libS = ffi.dlopen("Shell32.dll") + self.SW_SHOWNORMAL = 1 + +_cffi_wrapper = None + + +def startfile(filepath, operation=None): + global _cffi_wrapper + if _cffi_wrapper is None: + _cffi_wrapper = CFFIWrapper() + w = _cffi_wrapper + # + if operation is None: + operation = w.NULL + if isinstance(filepath, str): + if isinstance(operation, unicode): + operation = operation.encode("ascii") + rc = w.libS.ShellExecuteA(w.NULL, operation, filepath, + w.NULL, w.NULL, w.SW_SHOWNORMAL) + elif isinstance(filepath, unicode): + if isinstance(operation, str): + operation = operation.decode("ascii") + rc = w.libS.ShellExecuteW(w.NULL, operation, filepath, + w.NULL, w.NULL, w.SW_SHOWNORMAL) + else: + raise TypeError("argument 1 must be str or unicode") + rc = int(w.cast("uintptr_t", rc)) + if rc <= 32: + # sorry, no way to get the error message in less than one page of code + code = w.libK.GetLastError() + raise WindowsError(code, "Error %s" % code, filepath) From noreply at buildbot.pypy.org Tue Nov 12 12:44:02 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 12:44:02 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Add the NOT_RPYTHON comment at the top of the file, although that might Message-ID: <20131112114402.48C7C1C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67971:7adf80f17ffb Date: 2013-11-12 12:43 +0100 http://bitbucket.org/pypy/pypy/changeset/7adf80f17ffb/ Log: Add the NOT_RPYTHON comment at the top of the file, although that might not have any effect any more. diff --git a/pypy/module/posix/app_startfile.py b/pypy/module/posix/app_startfile.py --- a/pypy/module/posix/app_startfile.py +++ b/pypy/module/posix/app_startfile.py @@ -1,3 +1,4 @@ +# NOT_RPYTHON class CFFIWrapper(object): def __init__(self): From noreply at buildbot.pypy.org Tue Nov 12 12:51:49 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 12:51:49 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Found out that FormatError() is exposed via _rawffi. Would you believe. Message-ID: <20131112115149.80A191C0206@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67972:f262c7a6776c Date: 2013-11-12 12:51 +0100 http://bitbucket.org/pypy/pypy/changeset/f262c7a6776c/ Log: Found out that FormatError() is exposed via _rawffi. Would you believe. diff --git a/pypy/module/posix/app_startfile.py b/pypy/module/posix/app_startfile.py --- a/pypy/module/posix/app_startfile.py +++ b/pypy/module/posix/app_startfile.py @@ -40,6 +40,10 @@ raise TypeError("argument 1 must be str or unicode") rc = int(w.cast("uintptr_t", rc)) if rc <= 32: - # sorry, no way to get the error message in less than one page of code code = w.libK.GetLastError() - raise WindowsError(code, "Error %s" % code, filepath) + try: + import _rawffi + msg = _rawffi.FormatError(code) + except ImportError: + msg = 'Error %s' % code + raise WindowsError(code, msg, filepath) From noreply at buildbot.pypy.org Tue Nov 12 13:48:33 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 13:48:33 +0100 (CET) Subject: [pypy-commit] cffi default: Add ffi.getwinerror(). Message-ID: <20131112124833.46C001C07D2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1412:8a16eff7850c Date: 2013-11-12 13:48 +0100 http://bitbucket.org/cffi/cffi/changeset/8a16eff7850c/ Log: Add ffi.getwinerror(). diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5227,6 +5227,9 @@ {"set_errno", b_set_errno, METH_VARARGS}, {"newp_handle", b_newp_handle, METH_VARARGS}, {"from_handle", b_from_handle, METH_O}, +#ifdef MS_WIN32 + {"getwinerror", b_getwinerror, METH_VARARGS}, +#endif {"_get_types", b__get_types, METH_NOARGS}, {"_testfunc", b__testfunc, METH_VARARGS}, {NULL, NULL} /* Sentinel */ diff --git a/c/misc_win32.h b/c/misc_win32.h --- a/c/misc_win32.h +++ b/c/misc_win32.h @@ -80,6 +80,54 @@ /* else: cannot report the error */ } +static PyObject *b_getwinerror(PyObject *self, PyObject *args) +{ + int err = -1; + int len; + char *s; + char *s_buf = NULL; /* Free via LocalFree */ + char s_small_buf[28]; /* Room for "Windows Error 0xFFFFFFFF" */ + PyObject *v; + + if (!PyArg_ParseTuple(args, "|i", &err)) + return NULL; + + if (err == -1) { + struct cffi_errno_s *p; + p = _geterrno_object(); + if (p == NULL) + return PyErr_NoMemory(); + err = p->saved_lasterror; + } + + len = FormatMessage( + /* Error API error */ + FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, /* no message source */ + err, + MAKELANGID(LANG_NEUTRAL, + SUBLANG_DEFAULT), /* Default language */ + (LPTSTR) &s_buf, + 0, /* size not used */ + NULL); /* no args */ + if (len==0) { + /* Only seen this in out of mem situations */ + sprintf(s_small_buf, "Windows Error 0x%X", err); + s = s_small_buf; + s_buf = NULL; + } else { + s = s_buf; + /* remove trailing cr/lf and dots */ + while (len > 0 && (s[len-1] <= ' ' || s[len-1] == '.')) + s[--len] = '\0'; + } + v = Py_BuildValue("(is)", err, s); + LocalFree(s_buf); + return v; +} + /************************************************************/ /* Emulate dlopen()&co. from the Windows API */ diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -2698,6 +2698,16 @@ # res = GetLastError() assert res == 42 + # + SetLastError(2) + code, message = getwinerror() + assert code == 2 + assert message == "The system cannot find the file specified" + # + code, message = getwinerror(1155) + assert code == 1155 + assert message == ("No application is associated with the " + "specified file for this operation") def test_nonstandard_integer_types(): for typename in ['int8_t', 'uint8_t', 'int16_t', 'uint16_t', 'int32_t', diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -347,6 +347,9 @@ errno = property(_get_errno, _set_errno, None, "the value of 'errno' from/to the C calls") + def getwinerror(self, code=-1): + return self._backend.getwinerror(code) + def _pointer_to(self, ctype): from . import model with self._lock: diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1122,9 +1122,18 @@ ``ffi.errno``: the value of ``errno`` received from the most recent C call in this thread, and passed to the following C call, is available via -reads and writes of the property ``ffi.errno``. On Windows we also save -and restore the ``GetLastError()`` value, but to access it you need to -declare and call the ``GetLastError()`` function as usual. +reads and writes of the property ``ffi.errno``. + +``ffi.getwinerror(code=-1)``: on Windows, in addition to ``errno`` we +also save and restore the ``GetLastError()`` value across function +calls. This function returns this error code as a tuple ``(code, +message)``, adding a readable message like Python does when raising +WindowsError. If the argument ``code`` is given, format that code into +a message instead of using ``GetLastError()``. *New in version 0.8.* +(Note that it is also possible to declare and call the ``GetLastError()`` +function as usual.) + +.. "versionadded:: 0.8" --- inlined in the previous paragraph ``ffi.string(cdata, [maxlen])``: return a Python string (or unicode string) from the 'cdata'. *New in version 0.3.* diff --git a/testing/test_ffi_backend.py b/testing/test_ffi_backend.py --- a/testing/test_ffi_backend.py +++ b/testing/test_ffi_backend.py @@ -194,3 +194,20 @@ assert p.a[0] == 200 assert p.a[1] == 300 assert p.a[2] == 400 + + @pytest.mark.skipif("sys.platform != 'win32'") + def test_getwinerror(self): + ffi = FFI() + code, message = ffi.getwinerror(1155) + assert code == 1155 + assert message == ("No application is associated with the " + "specified file for this operation") + ffi.cdef("void SetLastError(int);") + lib = ffi.dlopen("Kernel32.dll") + lib.SetLastError(2) + code, message = ffi.getwinerror() + assert code == 2 + assert message == "The system cannot find the file specified" + code, message = ffi.getwinerror(-1) + assert code == 2 + assert message == "The system cannot find the file specified" From noreply at buildbot.pypy.org Tue Nov 12 13:56:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 13:56:17 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Like CPython, strip the message from trailing dots. Message-ID: <20131112125617.2DFD71C0F88@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67973:7ce0a6e10446 Date: 2013-11-12 13:54 +0100 http://bitbucket.org/pypy/pypy/changeset/7ce0a6e10446/ Log: Like CPython, strip the message from trailing dots. diff --git a/rpython/rlib/rwin32.py b/rpython/rlib/rwin32.py --- a/rpython/rlib/rwin32.py +++ b/rpython/rlib/rwin32.py @@ -216,7 +216,7 @@ def llimpl_FormatError(code): "Return a message corresponding to the given Windows error code." buf = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') - + buf[0] = lltype.nullptr(rffi.CCHARP.TO) try: msglen = FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, @@ -225,17 +225,20 @@ DEFAULT_LANGUAGE, rffi.cast(rffi.CCHARP, buf), 0, None) + buflen = intmask(msglen) - if msglen <= 2: # includes the case msglen < 0 - return fake_FormatError(code) + # remove trailing cr/lf and dots + s_buf = buf[0] + while buflen > 0 and (s_buf[buflen - 1] <= ' ' or + s_buf[buflen - 1] == '.'): + buflen -= 1 - # FormatMessage always appends \r\n. - buflen = intmask(msglen - 2) - assert buflen > 0 - - result = rffi.charpsize2str(buf[0], buflen) + if buflen <= 0: + result = fake_FormatError(code) + else: + result = rffi.charpsize2str(s_buf, buflen) + finally: LocalFree(rffi.cast(rffi.VOIDP, buf[0])) - finally: lltype.free(buf, flavor='raw') return result From noreply at buildbot.pypy.org Tue Nov 12 13:56:18 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 13:56:18 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Update to cffi/8a16eff7850c. Message-ID: <20131112125618.785B01C0F88@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67974:662e4114634e Date: 2013-11-12 13:55 +0100 http://bitbucket.org/pypy/pypy/changeset/662e4114634e/ Log: Update to cffi/8a16eff7850c. diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.mixedmodule import MixedModule from rpython.rlib import rdynload @@ -43,6 +44,8 @@ 'FFI_DEFAULT_ABI': 'ctypefunc._get_abi(space, "FFI_DEFAULT_ABI")', 'FFI_CDECL': 'ctypefunc._get_abi(space,"FFI_DEFAULT_ABI")',#win32 name } + if sys.platform == 'win32': + interpleveldefs['getwinerror'] = 'cerrno.getwinerror' for _name in ["RTLD_LAZY", "RTLD_NOW", "RTLD_GLOBAL", "RTLD_LOCAL", "RTLD_NODELETE", "RTLD_NOLOAD", "RTLD_DEEPBIND"]: diff --git a/pypy/module/_cffi_backend/cerrno.py b/pypy/module/_cffi_backend/cerrno.py --- a/pypy/module/_cffi_backend/cerrno.py +++ b/pypy/module/_cffi_backend/cerrno.py @@ -39,3 +39,14 @@ def set_errno(space, errno): ec = get_errno_container(space) ec._cffi_saved_errno = errno + +# ____________________________________________________________ + + at unwrap_spec(code=int) +def getwinerror(space, code=-1): + from rpython.rlib.rwin32 import FormatError + if code == -1: + ec = get_errno_container(space) + code = ec._cffi_saved_LastError + message = FormatError(code) + return space.newtuple([space.wrap(code), space.wrap(message)]) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2687,6 +2687,16 @@ # res = GetLastError() assert res == 42 + # + SetLastError(2) + code, message = getwinerror() + assert code == 2 + assert message == "The system cannot find the file specified" + # + code, message = getwinerror(1155) + assert code == 1155 + assert message == ("No application is associated with the " + "specified file for this operation") def test_nonstandard_integer_types(): for typename in ['int8_t', 'uint8_t', 'int16_t', 'uint16_t', 'int32_t', From noreply at buildbot.pypy.org Tue Nov 12 13:57:11 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 13:57:11 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Import cffi/8a16eff7850c here too Message-ID: <20131112125711.C24411C0F88@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67975:edcc6d30feff Date: 2013-11-12 13:56 +0100 http://bitbucket.org/pypy/pypy/changeset/edcc6d30feff/ Log: Import cffi/8a16eff7850c here too diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -347,6 +347,9 @@ errno = property(_get_errno, _set_errno, None, "the value of 'errno' from/to the C calls") + def getwinerror(self, code=-1): + return self._backend.getwinerror(code) + def _pointer_to(self, ctype): from . import model with self._lock: diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py @@ -195,3 +195,20 @@ assert p.a[0] == 200 assert p.a[1] == 300 assert p.a[2] == 400 + + @pytest.mark.skipif("sys.platform != 'win32'") + def test_getwinerror(self): + ffi = FFI() + code, message = ffi.getwinerror(1155) + assert code == 1155 + assert message == ("No application is associated with the " + "specified file for this operation") + ffi.cdef("void SetLastError(int);") + lib = ffi.dlopen("Kernel32.dll") + lib.SetLastError(2) + code, message = ffi.getwinerror() + assert code == 2 + assert message == "The system cannot find the file specified" + code, message = ffi.getwinerror(-1) + assert code == 2 + assert message == "The system cannot find the file specified" From noreply at buildbot.pypy.org Tue Nov 12 13:59:34 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 13:59:34 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Use getwinerror() here Message-ID: <20131112125934.57BD71C140E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67976:7341987b8055 Date: 2013-11-12 13:58 +0100 http://bitbucket.org/pypy/pypy/changeset/7341987b8055/ Log: Use getwinerror() here diff --git a/pypy/module/posix/app_startfile.py b/pypy/module/posix/app_startfile.py --- a/pypy/module/posix/app_startfile.py +++ b/pypy/module/posix/app_startfile.py @@ -11,9 +11,9 @@ """) self.NULL = ffi.NULL self.cast = ffi.cast - self.libK = ffi.dlopen("Kernel32.dll") - self.libS = ffi.dlopen("Shell32.dll") + self.lib = ffi.dlopen("Shell32.dll") self.SW_SHOWNORMAL = 1 + self.getwinerror = ffi.getwinerror _cffi_wrapper = None @@ -29,21 +29,16 @@ if isinstance(filepath, str): if isinstance(operation, unicode): operation = operation.encode("ascii") - rc = w.libS.ShellExecuteA(w.NULL, operation, filepath, - w.NULL, w.NULL, w.SW_SHOWNORMAL) + rc = w.lib.ShellExecuteA(w.NULL, operation, filepath, + w.NULL, w.NULL, w.SW_SHOWNORMAL) elif isinstance(filepath, unicode): if isinstance(operation, str): operation = operation.decode("ascii") - rc = w.libS.ShellExecuteW(w.NULL, operation, filepath, - w.NULL, w.NULL, w.SW_SHOWNORMAL) + rc = w.lib.ShellExecuteW(w.NULL, operation, filepath, + w.NULL, w.NULL, w.SW_SHOWNORMAL) else: raise TypeError("argument 1 must be str or unicode") rc = int(w.cast("uintptr_t", rc)) if rc <= 32: - code = w.libK.GetLastError() - try: - import _rawffi - msg = _rawffi.FormatError(code) - except ImportError: - msg = 'Error %s' % code + code, msg = w.getwinerror() raise WindowsError(code, msg, filepath) From noreply at buildbot.pypy.org Tue Nov 12 14:18:05 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 14:18:05 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Python 2.6 compatibility Message-ID: <20131112131805.DE28A1C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67977:3e1a49627e13 Date: 2013-11-12 14:17 +0100 http://bitbucket.org/pypy/pypy/changeset/3e1a49627e13/ Log: Python 2.6 compatibility diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -5,7 +5,6 @@ from __future__ import absolute_import import sys, types, inspect, weakref -from collections import OrderedDict from rpython.flowspace.model import Constant from rpython.annotator.model import (SomeOrderedDict, @@ -371,7 +370,7 @@ for e in x: listdef.generalize(self.immutablevalue(e, False)) result = SomeList(listdef) - elif tp is dict or tp is r_dict or tp is OrderedDict: + elif tp is dict or tp is r_dict or tp is SomeOrderedDict.knowntype: if need_const: key = Constant(x) try: @@ -413,7 +412,7 @@ dictdef.generalize_key(self.immutablevalue(ek, False)) dictdef.generalize_value(self.immutablevalue(ev, False)) dictdef.seen_prebuilt_key(ek) - if tp is OrderedDict: + if tp is SomeOrderedDict.knowntype: result = SomeOrderedDict(dictdef) else: result = SomeDict(dictdef) diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -2,7 +2,6 @@ Built-in functions. """ import sys -from collections import OrderedDict from rpython.annotator.model import ( SomeInteger, SomeObject, SomeChar, SomeBool, SomeString, SomeTuple, s_Bool, @@ -364,7 +363,7 @@ BUILTIN_ANALYZERS[rpython.rlib.objectmodel.instantiate] = robjmodel_instantiate BUILTIN_ANALYZERS[rpython.rlib.objectmodel.r_dict] = robjmodel_r_dict BUILTIN_ANALYZERS[rpython.rlib.objectmodel.r_ordereddict] = robjmodel_r_ordereddict -BUILTIN_ANALYZERS[OrderedDict] = lambda : SomeOrderedDict(getbookkeeper().getdictdef()) +BUILTIN_ANALYZERS[SomeOrderedDict.knowntype] = lambda : SomeOrderedDict(getbookkeeper().getdictdef()) BUILTIN_ANALYZERS[rpython.rlib.objectmodel.hlinvoke] = robjmodel_hlinvoke BUILTIN_ANALYZERS[rpython.rlib.objectmodel.keepalive_until_here] = robjmodel_keepalive_until_here BUILTIN_ANALYZERS[rpython.rtyper.lltypesystem.llmemory.cast_ptr_to_adr] = llmemory_cast_ptr_to_adr diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -32,7 +32,6 @@ import inspect import weakref from types import BuiltinFunctionType, MethodType -from collections import OrderedDict import rpython from rpython.tool import descriptor @@ -357,7 +356,11 @@ return '{...%s...}' % (len(const),) class SomeOrderedDict(SomeDict): - knowntype = OrderedDict + try: + from collections import OrderedDict as knowntype + except ImportError: # Python 2.6 + class PseudoOrderedDict(dict): pass + knowntype = PseudoOrderedDict def method_copy(dct): return SomeOrderedDict(dct.dictdef) diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -1,4 +1,3 @@ -from collections import OrderedDict from rpython.annotator import model as annmodel from rpython.flowspace.model import Constant @@ -750,7 +749,7 @@ BUILTIN_TYPER[isinstance] = rtype_builtin_isinstance BUILTIN_TYPER[hasattr] = rtype_builtin_hasattr BUILTIN_TYPER[objectmodel.r_dict] = rtype_r_dict -BUILTIN_TYPER[OrderedDict] = rtype_ordered_dict +BUILTIN_TYPER[annmodel.SomeOrderedDict.knowntype] = rtype_ordered_dict BUILTIN_TYPER[objectmodel.r_ordereddict] = rtype_ordered_dict # _________________________________________________________________ diff --git a/rpython/rtyper/test/test_rordereddict.py b/rpython/rtyper/test/test_rordereddict.py --- a/rpython/rtyper/test/test_rordereddict.py +++ b/rpython/rtyper/test/test_rordereddict.py @@ -1,6 +1,9 @@ import py -from collections import OrderedDict +try: + from collections import OrderedDict +except ImportError: # Python 2.6 + py.test.skip("requires collections.OrderedDict") from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem import rordereddict, rstr from rpython.rlib.rarithmetic import intmask From noreply at buildbot.pypy.org Tue Nov 12 14:24:30 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 14:24:30 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Add an app-level-only test for os.startfile() Message-ID: <20131112132430.2EBED1C0206@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67978:2cec7296d7fb Date: 2013-11-12 14:23 +0100 http://bitbucket.org/pypy/pypy/changeset/2cec7296d7fb/ Log: Add an app-level-only test for os.startfile() diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -52,6 +52,7 @@ def setup_class(cls): cls.space = space + cls.w_runappdirect = space.wrap(cls.runappdirect) cls.w_posix = space.appexec([], GET_POSIX) cls.w_path = space.wrap(str(path)) cls.w_path2 = space.wrap(str(path2)) @@ -1108,6 +1109,28 @@ assert False, "urandom() always returns the same string" # Or very unlucky + if hasattr(os, 'startfile'): + def test_startfile(self): + if not self.runappdirect: + skip("should not try to import cffi at app-level") + startfile = self.posix.startfile + for t1 in [str, unicode]: + for t2 in [str, unicode]: + e = raises(WindowsError, startfile, t1("\\"), t2("close")) + assert e.value.args[0] == 1155 + assert e.value.args[1] == ( + "No application is associated with the " + "specified file for this operation") + if len(e.value.args) > 2: + assert e.value.args[2] == t1("\\") + # + e = raises(WindowsError, startfile, "\\foo\\bar\\baz") + assert e.value.args[0] == 2 + assert e.value.args[1] == ( + "The system cannot find the file specified") + if len(e.value.args) > 2: + assert e.value.args[2] == "\\foo\\bar\\baz" + class AppTestEnvironment(object): def setup_class(cls): From noreply at buildbot.pypy.org Tue Nov 12 14:26:34 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 14:26:34 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Progress Message-ID: <20131112132634.47BB61C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67979:49bfc678ad16 Date: 2013-11-12 14:25 +0100 http://bitbucket.org/pypy/pypy/changeset/49bfc678ad16/ Log: Progress diff --git a/pypy/doc/release-2.2.0.rst b/pypy/doc/release-2.2.0.rst --- a/pypy/doc/release-2.2.0.rst +++ b/pypy/doc/release-2.2.0.rst @@ -1,9 +1,87 @@ ======================================= -PyPy 2.2 - xxx +PyPy 2.2 - Incrementalism ======================================= -GC! +We're pleased to announce PyPy 2.2, which targets version 2.7.3 of the Python +language. This release main highlight is the introduction of the incremental +garbage collector, sponsored by the `Raspberry Pi Foundation`_. +This release also contains several bugfixes and performance improvements. -numpypy module was removed in favor of an external numpy fork +You can download the PyPy 2.2 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. We showed quite a bit of progress on all three projects (see below) +and we're slowly running out of funds. +Please consider donating more so we can finish those projects! The three +projects are: + +* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. + +* STM (software transactional memory): a preview will be released very soon, + as soon as we fix a few bugs + +* NumPy: the work done is included in the PyPy 2.2 release. More details below. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* Our Garbage Collector is now "incremental". It should avoid almost all pauses due + to a major collection taking place. Previously, it would pause the program (rarely) + to walk all live objects, which could take arbitrarily long if your process is using + a whole lot of RAM. Now the same work is done in steps. This should make PyPy + more responsive, e.g. in games. There are still other pauses, from the GC and the JIT, + but they should be on the order of 5 milliseconds each. + +* The JIT counters for hot code were never reset, which meant that a process running + for long enough would eventually JIT-compile more and more rarely executed code. + Not only is it useless to compile such code, but as more compiled code means more + memory used, this gives the impression of a memory leak. This has been tentatively + fixed by decreasing the counters from time to time. + +* NumPy has been split: now PyPy only contains the core module, called ``_numpypy``. + The ``numpy`` module itself has been moved to ``https://bitbucket.org/pypy/numpy``. + You need to install it separately in a virtualenv with + ``pip install git+https://bitbucket.org/pypy/numpy.git``. + +* improvements to non-inlined calls + +* sys.set_trace is now JITted (think coverage) + +* faster json + +* improvements in buffer copying + +* tk is supported (XXX was already in pypy 2.1 it seems?? maybe not correctly packaged?) + +* We finally wrote all the missing ``os.xxx()`` functions. There are a lot of strange + ones that nobody ever heard about, except those who really need them. + +* numpy C API + + + the core module is included in PyPy 2.2, but you must now install + an external fork of numpy from https://bitbucket.org/pypy/numpy + + +removed in favor of an external numpy fork at https://bitbucket.org/pypy/numpy From noreply at buildbot.pypy.org Tue Nov 12 14:30:27 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 14:30:27 +0100 (CET) Subject: [pypy-commit] cffi default: Ah bah Message-ID: <20131112133027.568891C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1413:71a521ed3573 Date: 2013-11-12 14:30 +0100 http://bitbucket.org/cffi/cffi/changeset/71a521ed3573/ Log: Ah bah diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -711,7 +711,7 @@ "#define BAZ ...\n") lib = ffi.verify("#define FOO 42\n" "#define BAR (-44)\n" - "#define BAZ 0xffffffffffffffffLL\n") + "#define BAZ 0xffffffffffffffffULL\n") assert lib.FOO == 42 assert lib.BAR == -44 assert lib.BAZ == 0xffffffffffffffff From noreply at buildbot.pypy.org Tue Nov 12 14:31:45 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 14:31:45 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Import cffi/71a521ed3573 Message-ID: <20131112133145.310891C0206@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67980:729af32ca599 Date: 2013-11-12 14:31 +0100 http://bitbucket.org/pypy/pypy/changeset/729af32ca599/ Log: Import cffi/71a521ed3573 diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py @@ -712,7 +712,7 @@ "#define BAZ ...\n") lib = ffi.verify("#define FOO 42\n" "#define BAR (-44)\n" - "#define BAZ 0xffffffffffffffffLL\n") + "#define BAZ 0xffffffffffffffffULL\n") assert lib.FOO == 42 assert lib.BAR == -44 assert lib.BAZ == 0xffffffffffffffff From noreply at buildbot.pypy.org Tue Nov 12 14:34:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 14:34:24 +0100 (CET) Subject: [pypy-commit] cffi default: Skip half the test with MSVC Message-ID: <20131112133424.1797C1C0206@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1414:8c9468c7c428 Date: 2013-11-12 14:33 +0100 http://bitbucket.org/cffi/cffi/changeset/8c9468c7c428/ Log: Skip half the test with MSVC diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1601,6 +1601,8 @@ (maxulong, -1, ''), (-1, 0xffffffff, 'U'), (-1, maxulong, 'UL')]: + if c2c and sys.platform == 'win32': + continue # enums may always be signed with MSVC ffi = FFI() ffi.cdef("enum foo_e { AA=%s };" % c1) e = py.test.raises(VerificationError, ffi.verify, From noreply at buildbot.pypy.org Tue Nov 12 14:35:49 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 14:35:49 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Import cffi/8c9468c7c428 Message-ID: <20131112133549.8E4481C0206@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r67981:f3423a62cd09 Date: 2013-11-12 14:33 +0100 http://bitbucket.org/pypy/pypy/changeset/f3423a62cd09/ Log: Import cffi/8c9468c7c428 diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py @@ -1602,6 +1602,8 @@ (maxulong, -1, ''), (-1, 0xffffffff, 'U'), (-1, maxulong, 'UL')]: + if c2c and sys.platform == 'win32': + continue # enums may always be signed with MSVC ffi = FFI() ffi.cdef("enum foo_e { AA=%s };" % c1) e = py.test.raises(VerificationError, ffi.verify, From noreply at buildbot.pypy.org Tue Nov 12 15:01:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 15:01:01 +0100 (CET) Subject: [pypy-commit] cffi default: Carefully write the Python 3 version of getwinerror(). I have no way to Message-ID: <20131112140101.D66051C12CD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1415:3f83df0a79e3 Date: 2013-11-12 15:00 +0100 http://bitbucket.org/cffi/cffi/changeset/3f83df0a79e3/ Log: Carefully write the Python 3 version of getwinerror(). I have no way to know if I did a typo there. diff --git a/c/misc_win32.h b/c/misc_win32.h --- a/c/misc_win32.h +++ b/c/misc_win32.h @@ -80,6 +80,54 @@ /* else: cannot report the error */ } +#if PY_MAJOR_VERSION >= 3 +static PyObject *b_getwinerror(PyObject *self, PyObject *args) +{ + int err = -1; + int len; + WCHAR *s_buf = NULL; /* Free via LocalFree */ + PyObject *v, *message; + + if (!PyArg_ParseTuple(args, "|i", &err)) + return NULL; + + if (err == -1) { + struct cffi_errno_s *p; + p = _geterrno_object(); + if (p == NULL) + return PyErr_NoMemory(); + err = p->saved_lasterror; + } + + len = FormatMessageW( + /* Error API error */ + FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, /* no message source */ + err, + MAKELANGID(LANG_NEUTRAL, + SUBLANG_DEFAULT), /* Default language */ + (LPWSTR) &s_buf, + 0, /* size not used */ + NULL); /* no args */ + if (len==0) { + /* Only seen this in out of mem situations */ + message = PyUnicode_FromFormat("Windows Error 0x%X", err); + } else { + /* remove trailing cr/lf and dots */ + while (len > 0 && (s_buf[len-1] <= L' ' || s_buf[len-1] == L'.')) + s_buf[--len] = L'\0'; + message = PyUnicode_FromWideChar(s_buf, len); + } + if (message != NULL) + v = Py_BuildValue("(iO)", err, message); + else + v = NULL; + LocalFree(s_buf); + return v; +} +#else static PyObject *b_getwinerror(PyObject *self, PyObject *args) { int err = -1; @@ -127,6 +175,7 @@ LocalFree(s_buf); return v; } +#endif /************************************************************/ /* Emulate dlopen()&co. from the Windows API */ From noreply at buildbot.pypy.org Tue Nov 12 15:19:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 15:19:17 +0100 (CET) Subject: [pypy-commit] pypy default: Don't crash if no user group is called 'root'. Message-ID: <20131112141917.629EC1C147B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67982:c151e8359d3a Date: 2013-11-12 15:18 +0100 http://bitbucket.org/pypy/pypy/changeset/c151e8359d3a/ Log: Don't crash if no user group is called 'root'. diff --git a/pypy/module/test_lib_pypy/test_grp_extra.py b/pypy/module/test_lib_pypy/test_grp_extra.py --- a/pypy/module/test_lib_pypy/test_grp_extra.py +++ b/pypy/module/test_lib_pypy/test_grp_extra.py @@ -10,7 +10,11 @@ "No grp module on this platform") def test_basic(self): - g = self.grp.getgrnam("root") + raises(KeyError, self.grp.getgrnam, "dEkLofcG") + try: + g = self.grp.getgrnam("root") + except KeyError: + return # no 'root' group on OS/X? assert g.gr_gid == 0 assert g.gr_mem == ['root'] or g.gr_mem == [] assert g.gr_name == 'root' From noreply at buildbot.pypy.org Tue Nov 12 16:02:34 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 12 Nov 2013 16:02:34 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: fix for threadlocal access. It was missing barriers and push_roots. Message-ID: <20131112150234.A67E21C036B@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r67983:73cb960f51c7 Date: 2013-11-12 16:01 +0100 http://bitbucket.org/pypy/pypy/changeset/73cb960f51c7/ Log: fix for threadlocal access. It was missing barriers and push_roots. diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -1,3 +1,9 @@ +------------------------------------------------------------ + +should stm_thread_local_obj always be read & writeable? would +a write-barrier in begin_transaction be too much for small +transactions? should we handle it specially (undolog?) + ------------------------------------------------------------ looking at trace of fibo.tlc (targettlc.py), there are a lot diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -104,6 +104,7 @@ gct_stm_perform_transaction = _gct_with_roots_pushed gct_stm_allocate_nonmovable_int_adr = _gct_with_roots_pushed gct_stm_inspect_abort_info = _gct_with_roots_pushed + gct_stm_threadlocalref_set = _gct_with_roots_pushed class StmRootWalker(BaseRootWalker): diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -440,7 +440,8 @@ 'stm_weakref_allocate': LLOp(sideeffects=False, canmallocgc=True), 'stm_threadlocalref_get': LLOp(sideeffects=False), - 'stm_threadlocalref_set': LLOp(), + 'stm_threadlocalref_set': LLOp(canmallocgc=True), # may allocate new array, + # see threadlocalref.py 'stm_threadlocal_get': LLOp(sideeffects=False), 'stm_threadlocal_set': LLOp(), diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -53,7 +53,14 @@ def stm_barrier(funcgen, op): category_change = op.args[0].value - frm, middle, to = category_change + # XXX: how to unify the stm_barrier llop generation in + # writebarrier.py and threadlocalref.py? + if isinstance(category_change, str): + frm, middle, to = category_change + else: # rstr + frm, middle, to = (category_change.chars[0], + category_change.chars[1], + category_change.chars[2]) assert middle == '2' assert frm < to if to == 'W': diff --git a/rpython/translator/stm/threadlocalref.py b/rpython/translator/stm/threadlocalref.py --- a/rpython/translator/stm/threadlocalref.py +++ b/rpython/translator/stm/threadlocalref.py @@ -28,13 +28,19 @@ if not array: return lltype.nullptr(rclass.OBJECTPTR.TO) else: + array = llop.stm_barrier(lltype.Ptr(ARRAY), 'A2R', array) return array[index] # def ll_threadlocalref_set(index, newvalue): array = llop.stm_threadlocal_get(lltype.Ptr(ARRAY)) if not array: - array = lltype.malloc(ARRAY, total) + array = lltype.malloc(ARRAY, total) # llop may allocate! llop.stm_threadlocal_set(lltype.Void, array) + else: + array = llop.stm_barrier(lltype.Ptr(ARRAY), 'A2W', array) + # invalidating other barriers after an llop.threadlocalref_set + # is not necessary since no other variable should contain + # a reference to stm_threadlocal_obj array[index] = newvalue # annhelper = annlowlevel.MixLevelHelperAnnotator(t.rtyper) From noreply at buildbot.pypy.org Tue Nov 12 16:13:27 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 16:13:27 +0100 (CET) Subject: [pypy-commit] cffi default: Test various combinations of calls. Mostly a libffi stress-test. Message-ID: <20131112151327.4A48D1C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1416:be9745bbc509 Date: 2013-11-12 16:12 +0100 http://bitbucket.org/cffi/cffi/changeset/be9745bbc509/ Log: Test various combinations of calls. Mostly a libffi stress-test. diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1767,3 +1767,87 @@ ffi.cdef("""const int a[];""") lib = ffi.verify("""const int a[5];""") assert repr(ffi.typeof(lib.a)) == "" + +def _test_various_calls(force_libffi): + cdef_source = """ + int xvalue; + long long ivalue, rvalue; + float fvalue; + double dvalue; + long double Dvalue; + signed char tf_bb(signed char x, signed char c); + unsigned char tf_bB(signed char x, unsigned char c); + short tf_bh(signed char x, short c); + unsigned short tf_bH(signed char x, unsigned short c); + int tf_bi(signed char x, int c); + unsigned int tf_bI(signed char x, unsigned int c); + long tf_bl(signed char x, long c); + unsigned long tf_bL(signed char x, unsigned long c); + long long tf_bq(signed char x, long long c); + float tf_bf(signed char x, float c); + double tf_bd(signed char x, double c); + long double tf_bD(signed char x, long double c); + """ + if force_libffi: + cdef_source = (cdef_source + .replace('tf_', '(*const tf_') + .replace('(signed char x', ')(signed char x')) + ffi = FFI() + ffi.cdef(cdef_source) + lib = ffi.verify(""" + int xvalue; + long long ivalue, rvalue; + float fvalue; + double dvalue; + long double Dvalue; + + #define S(letter) xvalue = x; letter##value = c; return rvalue; + + signed char tf_bb(signed char x, signed char c) { S(i) } + unsigned char tf_bB(signed char x, unsigned char c) { S(i) } + short tf_bh(signed char x, short c) { S(i) } + unsigned short tf_bH(signed char x, unsigned short c) { S(i) } + int tf_bi(signed char x, int c) { S(i) } + unsigned int tf_bI(signed char x, unsigned int c) { S(i) } + long tf_bl(signed char x, long c) { S(i) } + unsigned long tf_bL(signed char x, unsigned long c) { S(i) } + long long tf_bq(signed char x, long long c) { S(i) } + float tf_bf(signed char x, float c) { S(f) } + double tf_bd(signed char x, double c) { S(d) } + long double tf_bD(signed char x, long double c) { S(D) } + """) + lib.rvalue = 0x7182838485868788 + for kind, cname in [('b', 'signed char'), + ('B', 'unsigned char'), + ('h', 'short'), + ('H', 'unsigned short'), + ('i', 'int'), + ('I', 'unsigned int'), + ('l', 'long'), + ('L', 'unsigned long'), + ('q', 'long long'), + ('f', 'float'), + ('d', 'double'), + ('D', 'long double')]: + sign = +1 if 'unsigned' in cname else -1 + lib.xvalue = 0 + lib.ivalue = 0 + lib.fvalue = 0 + lib.dvalue = 0 + lib.Dvalue = 0 + fun = getattr(lib, 'tf_b' + kind) + res = fun(-42, sign * 99) + if kind == 'D': + res = float(res) + assert res == int(ffi.cast(cname, 0x7182838485868788)) + assert lib.xvalue == -42 + if kind in 'fdD': + assert float(getattr(lib, kind + 'value')) == -99.0 + else: + assert lib.ivalue == sign * 99 + +def test_various_calls_direct(): + _test_various_calls(force_libffi=False) + +def test_various_calls_libffi(): + _test_various_calls(force_libffi=True) From noreply at buildbot.pypy.org Tue Nov 12 18:29:27 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 12 Nov 2013 18:29:27 +0100 (CET) Subject: [pypy-commit] buildbot default: allow the BUILDJITLINUXARMHF_RARING builder to be forced Message-ID: <20131112172927.908D21C0F88@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r889:2c32efb0fb5d Date: 2013-11-12 18:26 +0100 http://bitbucket.org/pypy/buildbot/changeset/2c32efb0fb5d/ Log: allow the BUILDJITLINUXARMHF_RARING builder to be forced diff --git a/bot2/pypybuildbot/arm_master.py b/bot2/pypybuildbot/arm_master.py --- a/bot2/pypybuildbot/arm_master.py +++ b/bot2/pypybuildbot/arm_master.py @@ -136,6 +136,7 @@ BUILDJITLINUXARM, BUILDLINUXARMHF_RASPBIAN, BUILDJITLINUXARMHF_RASPBIAN, + BUILDJITLINUXARMHF_RARING, ] schedulers = [ From noreply at buildbot.pypy.org Tue Nov 12 19:13:38 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 12 Nov 2013 19:13:38 +0100 (CET) Subject: [pypy-commit] pypy default: fix this test a bit more strictly Message-ID: <20131112181338.23D221C0206@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67984:b1014f8d559c Date: 2013-11-12 10:12 -0800 http://bitbucket.org/pypy/pypy/changeset/b1014f8d559c/ Log: fix this test a bit more strictly diff --git a/pypy/module/test_lib_pypy/test_grp_extra.py b/pypy/module/test_lib_pypy/test_grp_extra.py --- a/pypy/module/test_lib_pypy/test_grp_extra.py +++ b/pypy/module/test_lib_pypy/test_grp_extra.py @@ -11,14 +11,18 @@ def test_basic(self): raises(KeyError, self.grp.getgrnam, "dEkLofcG") - try: - g = self.grp.getgrnam("root") - except KeyError: - return # no 'root' group on OS/X? - assert g.gr_gid == 0 - assert g.gr_mem == ['root'] or g.gr_mem == [] - assert g.gr_name == 'root' - assert isinstance(g.gr_passwd, str) # usually just 'x', don't hope :-) + for name in ["root", "wheel"]: + try: + g = self.grp.getgrnam(name) + except KeyError: + continue + assert g.gr_gid == 0 + assert g.gr_mem == ['root'] or g.gr_mem == [] + assert g.gr_name == name + assert isinstance(g.gr_passwd, str) # usually just 'x', don't hope :-) + break + else: + raise def test_extra(self): grp = self.grp From noreply at buildbot.pypy.org Tue Nov 12 19:19:58 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 12 Nov 2013 19:19:58 +0100 (CET) Subject: [pypy-commit] pypy default: update usemodules so it doesn't skip untranslated Message-ID: <20131112181958.7C3AA1C036B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67985:9c9132e23801 Date: 2013-11-12 13:19 -0500 http://bitbucket.org/pypy/pypy/changeset/9c9132e23801/ Log: update usemodules so it doesn't skip untranslated diff --git a/pypy/module/test_lib_pypy/test_grp_extra.py b/pypy/module/test_lib_pypy/test_grp_extra.py --- a/pypy/module/test_lib_pypy/test_grp_extra.py +++ b/pypy/module/test_lib_pypy/test_grp_extra.py @@ -2,8 +2,7 @@ class AppTestGrp: - - spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + spaceconfig = dict(usemodules=('binascii', '_ffi', '_rawffi', 'itertools')) def setup_class(cls): cls.w_grp = import_lib_pypy(cls.space, 'grp', From noreply at buildbot.pypy.org Tue Nov 12 19:23:09 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 12 Nov 2013 19:23:09 +0100 (CET) Subject: [pypy-commit] pypy armhf-singlefloat: enable singlefloats on ARMHF Message-ID: <20131112182309.9FAA01C036B@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: armhf-singlefloat Changeset: r67986:063d0dcda94f Date: 2013-11-12 09:46 -0600 http://bitbucket.org/pypy/pypy/changeset/063d0dcda94f/ Log: enable singlefloats on ARMHF diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -22,7 +22,7 @@ supports_floats = True supports_longlong = False # XXX requires an implementation of # read_timestamp that works in user mode - supports_singlefloats = not detect_hardfloat() + supports_singlefloats = True from rpython.jit.backend.arm.arch import JITFRAME_FIXED_SIZE all_reg_indexes = range(len(all_regs)) From noreply at buildbot.pypy.org Tue Nov 12 19:23:11 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 12 Nov 2013 19:23:11 +0100 (CET) Subject: [pypy-commit] pypy armhf-singlefloat: unused Message-ID: <20131112182311.38EEC1C036B@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: armhf-singlefloat Changeset: r67987:41afcae95cfd Date: 2013-11-12 09:49 -0600 http://bitbucket.org/pypy/pypy/changeset/41afcae95cfd/ Log: unused diff --git a/rpython/jit/backend/arm/locations.py b/rpython/jit/backend/arm/locations.py --- a/rpython/jit/backend/arm/locations.py +++ b/rpython/jit/backend/arm/locations.py @@ -55,10 +55,6 @@ type = FLOAT width = 2 * WORD - def get_single_precision_regs(self): - return [VFPRegisterLocation(i) for i in - [self.value * 2, self.value * 2 + 1]] - def __repr__(self): return 'vfp%d' % self.value From noreply at buildbot.pypy.org Tue Nov 12 19:23:12 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 12 Nov 2013 19:23:12 +0100 (CET) Subject: [pypy-commit] pypy armhf-singlefloat: add single precission VFP register locations Message-ID: <20131112182312.973151C036B@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: armhf-singlefloat Changeset: r67988:93c961fe1b79 Date: 2013-11-12 09:52 -0600 http://bitbucket.org/pypy/pypy/changeset/93c961fe1b79/ Log: add single precission VFP register locations diff --git a/rpython/jit/backend/arm/locations.py b/rpython/jit/backend/arm/locations.py --- a/rpython/jit/backend/arm/locations.py +++ b/rpython/jit/backend/arm/locations.py @@ -56,7 +56,7 @@ width = 2 * WORD def __repr__(self): - return 'vfp%d' % self.value + return 'vfp(d%d)' % self.value def is_core_reg(self): return False @@ -70,6 +70,14 @@ def is_float(self): return True +class SVFPRegisterLocation(VFPRegisterLocation): + """Single Precission VFP Register""" + _immutable_ = True + width = WORD + type = 'S' + + def __repr__(self): + return 'vfp(s%d)' % self.value class ImmLocation(AssemblerLocation): _immutable_ = True diff --git a/rpython/jit/backend/arm/registers.py b/rpython/jit/backend/arm/registers.py --- a/rpython/jit/backend/arm/registers.py +++ b/rpython/jit/backend/arm/registers.py @@ -1,8 +1,10 @@ from rpython.jit.backend.arm.locations import VFPRegisterLocation +from rpython.jit.backend.arm.locations import SVFPRegisterLocation from rpython.jit.backend.arm.locations import RegisterLocation registers = [RegisterLocation(i) for i in range(16)] vfpregisters = [VFPRegisterLocation(i) for i in range(16)] +svfpregisters = [SVFPRegisterLocation(i) for i in range(32)] [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15] = registers @@ -10,6 +12,10 @@ [d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15] = vfpregisters +# single precission VFP registers, 32-bit +for i in range(32): + globals()['s%d' % i] = svfpregisters[i] + # aliases for registers fp = r11 ip = r12 @@ -27,6 +33,7 @@ callee_restored_registers = callee_resp + [pc] vfp_argument_regs = caller_vfp_resp = [d0, d1, d2, d3, d4, d5, d6, d7] +svfp_argument_regs = [globals()['s%i' % i] for i in range(16)] callee_vfp_resp = [d8, d9, d10, d11, d12, d13, d14, d15] callee_saved_vfp_registers = callee_vfp_resp From noreply at buildbot.pypy.org Tue Nov 12 19:23:14 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 12 Nov 2013 19:23:14 +0100 (CET) Subject: [pypy-commit] pypy armhf-singlefloat: add operations to move between core and single precision VFP registers Message-ID: <20131112182314.052781C0F88@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: armhf-singlefloat Changeset: r67989:3c95419f55a7 Date: 2013-11-12 12:06 -0600 http://bitbucket.org/pypy/pypy/changeset/3c95419f55a7/ Log: add operations to move between core and single precision VFP registers diff --git a/rpython/jit/backend/arm/codebuilder.py b/rpython/jit/backend/arm/codebuilder.py --- a/rpython/jit/backend/arm/codebuilder.py +++ b/rpython/jit/backend/arm/codebuilder.py @@ -178,6 +178,30 @@ | (dm & 0xF)) self.write32(instr) + def VMOV_sc(self, dest, src): + """move a single precision vfp register[src] to a core reg[dest]""" + self._VMOV_32bit(src, dest, to_arm_register=1) + + def VMOV_cs(self, dest, src): + """move a core register[src] to a single precision vfp + register[dest]""" + self._VMOV_32bit(dest, src, to_arm_register=0) + + def _VMOV_32bit(self, float_reg, core_reg, to_arm_register, cond=cond.AL): + """This instruction transfers the contents of a single-precision VFP + register to an ARM core register, or the contents of an ARM core + register to a single-precision VFP register. + """ + instr = (cond << 28 + | 0xE << 24 + | to_arm_register << 20 + | ((float_reg >> 1) & 0xF) << 16 + | core_reg << 12 + | 0xA << 8 + | (float_reg & 0x1) << 7 + | 1 << 4) + self.write32(instr) + def VMOV_cc(self, dd, dm, cond=cond.AL): sz = 1 # for 64-bit mode instr = (cond << 28 From noreply at buildbot.pypy.org Tue Nov 12 19:23:15 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 12 Nov 2013 19:23:15 +0100 (CET) Subject: [pypy-commit] pypy armhf-singlefloat: handle singlefloats in callbuilder Message-ID: <20131112182315.76D6E1C036B@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: armhf-singlefloat Changeset: r67990:7bc08fc09a9e Date: 2013-11-12 12:07 -0600 http://bitbucket.org/pypy/pypy/changeset/7bc08fc09a9e/ Log: handle singlefloats in callbuilder diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -227,20 +227,81 @@ class HardFloatCallBuilder(ARMCallbuilder): + next_arg_vfp = 0 + next_arg_svfp = 0 + + def get_next_vfp(self, tp): + assert tp in 'fS' + if self.next_arg_vfp == -1: + return None + if tp == 'S': + i = self.next_arg_svfp + next_vfp = (i >> 1) + 1 + if not (i + 1) & 1: # i is even + self.next_arg_vfp = max(self.next_arg_vfp, next_vfp) + self.next_arg_svfp = self.next_arg_vfp << 1 + else: + self.next_arg_svfp += 1 + self.next_arg_vfp = next_vfp + lst = r.svfp_argument_regs + else: # 64bit double + i = self.next_arg_vfp + self.next_arg_vfp += 1 + if self.next_arg_svfp >> 1 == i: + self.next_arg_svfp = self.next_arg_vfp << 1 + lst = r.vfp_argument_regs + try: + return lst[i] + except IndexError: + self.next_arg_vfp = self.next_arg_svfp = -1 + return None + def prepare_arguments(self): non_float_locs = [] non_float_regs = [] float_locs = [] float_regs = [] stack_args = [] + singlefloats = None arglocs = self.arglocs argtypes = self.argtypes count = 0 # stack alignment counter on_stack = 0 - for arg in arglocs: - if arg.type != FLOAT: + for i in range(len(arglocs)): + argtype = INT + if i < len(argtypes) and argtypes[i] == 'S': + argtype = argtypes[i] + arg = arglocs[i] + if arg.is_float(): + argtype = FLOAT + reg = self.get_next_vfp(argtype) + if reg: + assert len(float_regs) < len(r.vfp_argument_regs) + float_locs.append(arg) + assert reg not in float_regs + float_regs.append(reg) + else: # float argument that needs to go on the stack + if count % 2 != 0: + stack_args.append(None) + count = 0 + on_stack += 1 + stack_args.append(arg) + on_stack += 2 + elif argtype == 'S': + # Singlefloat argument + if singlefloats is None: + singlefloats = [] + tgt = self.get_next_vfp(argtype) + if tgt: + singlefloats.append((arg, tgt)) + else: # Singlefloat argument that needs to go on the stack + # treated the same as a regular core register argument + count += 1 + on_stack += 1 + stack_args.append(arg) + else: if len(non_float_regs) < len(r.argument_regs): reg = r.argument_regs[len(non_float_regs)] non_float_locs.append(arg) @@ -249,18 +310,6 @@ count += 1 on_stack += 1 stack_args.append(arg) - else: - if len(float_regs) < len(r.vfp_argument_regs): - reg = r.vfp_argument_regs[len(float_regs)] - float_locs.append(arg) - float_regs.append(reg) - else: # float argument that needs to go on the stack - if count % 2 != 0: - stack_args.append(None) - count = 0 - on_stack += 1 - stack_args.append(arg) - on_stack += 2 # align the stack if count % 2 != 0: stack_args.append(None) @@ -275,13 +324,28 @@ non_float_locs.append(self.fnloc) non_float_regs.append(r.r4) self.fnloc = r.r4 + # remap values stored in vfp registers + remap_frame_layout(self.asm, float_locs, float_regs, r.vfp_ip) + if singlefloats: + for src, dest in singlefloats: + if src.is_float(): + assert 0, 'unsupported case' + if src.is_stack(): + # use special VLDR for 32bit + self.asm.regalloc_mov(src, r.ip) + src = r.ip + if src.is_imm(): + self.mc.gen_load_int(r.ip.value, src.value) + src = r.ip + if src.is_core_reg(): + self.mc.VMOV_cs(dest.value, src.value) # remap values stored in core registers remap_frame_layout(self.asm, non_float_locs, non_float_regs, r.ip) - # remap values stored in vfp registers - remap_frame_layout(self.asm, float_locs, float_regs, r.vfp_ip) def load_result(self): resloc = self.resloc + if self.restype == 'S': + self.mc.VMOV_sc(resloc.value, r.s0.value) # ensure the result is wellformed and stored in the correct location if resloc is not None and resloc.is_core_reg(): self._ensure_result_bit_extension(resloc, From noreply at buildbot.pypy.org Tue Nov 12 21:32:32 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 12 Nov 2013 21:32:32 +0100 (CET) Subject: [pypy-commit] pypy default: clean up some numpypy references Message-ID: <20131112203232.D25BE1C13FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67991:e85c7e78f011 Date: 2013-11-12 15:25 -0500 http://bitbucket.org/pypy/pypy/changeset/e85c7e78f011/ Log: clean up some numpypy references diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -401,7 +401,7 @@ raise operationerrfmt(space.w_TypeError, msg, w_dtype) W_Dtype.typedef = TypeDef("dtype", - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(descr__new__), __str__= interp2app(W_Dtype.descr_str), diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -338,11 +338,11 @@ Returns an array containing the same data with a new shape. - Refer to `numpypy.reshape` for full documentation. + Refer to `numpy.reshape` for full documentation. See Also -------- - numpypy.reshape : equivalent function + numpy.reshape : equivalent function """ args_w, kw_w = __args__.unpack() order = NPY_CORDER @@ -1123,9 +1123,8 @@ return res """, filename=__file__).interphook('ptp') -W_NDimArray.typedef = TypeDef( - "ndarray", - __module__ = "numpypy", +W_NDimArray.typedef = TypeDef("ndarray", + __module__ = "numpy", __new__ = interp2app(descr_new_array), __len__ = interp2app(W_NDimArray.descr_len), @@ -1391,8 +1390,8 @@ return box -W_FlatIterator.typedef = TypeDef( - 'flatiter', +W_FlatIterator.typedef = TypeDef("flatiter", + __module__ = "numpy", __iter__ = interp2app(W_FlatIterator.descr_iter), __getitem__ = interp2app(W_FlatIterator.descr_getitem), __setitem__ = interp2app(W_FlatIterator.descr_setitem), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -413,7 +413,7 @@ W_Ufunc.typedef = TypeDef("ufunc", - __module__ = "numpypy", + __module__ = "numpy", __call__ = interp2app(W_Ufunc.descr_call), __repr__ = interp2app(W_Ufunc.descr_repr), diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -26,6 +26,7 @@ assert d.kind == 'b' assert dtype(d) is d assert dtype('bool') is d + assert repr(type(d)) == "" assert dtype('int8').num == 1 assert dtype('int8').name == 'int8' diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -249,9 +249,11 @@ return CustomIntObject(value) def test_ndarray(self): - from numpypy import ndarray, array, dtype + from numpy import ndarray, array, dtype, flatiter assert type(ndarray) is type + assert repr(ndarray) == "" + assert repr(flatiter) == "" assert type(array) is not type a = ndarray((2, 3)) assert a.shape == (2, 3) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -81,7 +81,7 @@ assert isinstance(add, ufunc) assert repr(add) == "" - assert repr(ufunc) == "" or repr(ufunc) == "" + assert repr(ufunc) == "" def test_ufunc_attrs(self): from numpypy import add, multiply, sin From noreply at buildbot.pypy.org Tue Nov 12 21:50:13 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 12 Nov 2013 21:50:13 +0100 (CET) Subject: [pypy-commit] pypy armhf-singlefloat: use single precision operations for casts to and from float Message-ID: <20131112205013.C78371C1413@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: armhf-singlefloat Changeset: r67992:d8b1b0c57e72 Date: 2013-11-12 14:41 -0600 http://bitbucket.org/pypy/pypy/changeset/d8b1b0c57e72/ Log: use single precision operations for casts to and from float diff --git a/rpython/jit/backend/arm/codebuilder.py b/rpython/jit/backend/arm/codebuilder.py --- a/rpython/jit/backend/arm/codebuilder.py +++ b/rpython/jit/backend/arm/codebuilder.py @@ -222,8 +222,16 @@ self._VCVT(target, source, cond, 0, 1) def _VCVT(self, target, source, cond, opc2, sz): - D = 0 - M = 0 + # A8.6.295 + to_integer = (opc2 >> 2) & 1 + if to_integer: + D = target & 1 + target >>= 1 + M = (source >> 4) & 1 + else: + M = source & 1 + source >>= 1 + D = (target >> 4) & 1 op = 1 instr = (cond << 28 | 0xEB8 << 16 @@ -240,8 +248,8 @@ def _VCVT_single_double(self, target, source, cond, sz): # double_to_single = (sz == '1'); - D = 0 - M = 0 + D = target & 1 if sz else (target >> 4) & 1 + M = (source >> 4) & 1 if sz else source & 1 instr = (cond << 28 | 0xEB7 << 16 | 0xAC << 4 diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1102,17 +1102,16 @@ arg, res = arglocs assert arg.is_vfp_reg() assert res.is_core_reg() - self.mc.VCVT_float_to_int(r.vfp_ip.value, arg.value) - self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value) + self.mc.VCVT_float_to_int(r.svfp_ip.value, arg.value) + self.mc.VMOV_sc(res.value, r.svfp_ip.value) return fcond def emit_op_cast_int_to_float(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert res.is_vfp_reg() assert arg.is_core_reg() - self.mc.MOV_ri(r.ip.value, 0) - self.mc.VMOV_cr(res.value, arg.value, r.ip.value) - self.mc.VCVT_int_to_float(res.value, res.value) + self.mc.VMOV_cs(r.svfp_ip.value, arg.value) + self.mc.VCVT_int_to_float(res.value, r.svfp_ip.value) return fcond emit_op_llong_add = gen_emit_float_op('llong_add', 'VADD_i64') @@ -1147,15 +1146,14 @@ arg, res = arglocs assert arg.is_vfp_reg() assert res.is_core_reg() - self.mc.VCVT_f64_f32(r.vfp_ip.value, arg.value) - self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value) + self.mc.VCVT_f64_f32(r.svfp_ip.value, arg.value) + self.mc.VMOV_sc(res.value, r.svfp_ip.value) return fcond def emit_op_cast_singlefloat_to_float(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert res.is_vfp_reg() assert arg.is_core_reg() - self.mc.MOV_ri(r.ip.value, 0) - self.mc.VMOV_cr(res.value, arg.value, r.ip.value) - self.mc.VCVT_f32_f64(res.value, res.value) + self.mc.VMOV_cs(r.svfp_ip.value, arg.value) + self.mc.VCVT_f32_f64(res.value, r.svfp_ip.value) return fcond diff --git a/rpython/jit/backend/arm/registers.py b/rpython/jit/backend/arm/registers.py --- a/rpython/jit/backend/arm/registers.py +++ b/rpython/jit/backend/arm/registers.py @@ -23,6 +23,7 @@ lr = r14 pc = r15 vfp_ip = d15 +svfp_ip = s31 all_regs = [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10] all_vfp_regs = vfpregisters[:-1] From noreply at buildbot.pypy.org Tue Nov 12 22:43:17 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 12 Nov 2013 22:43:17 +0100 (CET) Subject: [pypy-commit] pypy default: add basic ndarray.flags object Message-ID: <20131112214317.81BBB1C0206@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67993:49ca29878cd3 Date: 2013-11-12 16:24 -0500 http://bitbucket.org/pypy/pypy/changeset/49ca29878cd3/ Log: add basic ndarray.flags object diff --git a/pypy/module/micronumpy/interp_flagsobj.py b/pypy/module/micronumpy/interp_flagsobj.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_flagsobj.py @@ -0,0 +1,45 @@ +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.error import OperationError + + +class W_FlagsObject(W_Root): + def __init__(self, arr): + self.arr = arr + + def descr_get_contiguous(self, space): + return space.w_True + + def descr_get_fortran(self, space): + return space.w_False + + def descr_get_writeable(self, space): + return space.w_True + + def descr_getitem(self, space, w_item): + key = space.str_w(w_item) + if key == "C" or key == "CONTIGUOUS" or key == "C_CONTIGUOUS": + return self.descr_get_contiguous(space) + if key == "F" or key == "FORTRAN" or key == "F_CONTIGUOUS": + return self.descr_get_fortran(space) + if key == "W" or key == "WRITEABLE": + return self.descr_get_writeable(space) + raise OperationError(space.w_KeyError, space.wrap( + "Unknown flag")) + + def descr_setitem(self, space, w_item, w_value): + raise OperationError(space.w_KeyError, space.wrap( + "Unknown flag")) + +W_FlagsObject.typedef = TypeDef("flagsobj", + __module__ = "numpy", + __getitem__ = interp2app(W_FlagsObject.descr_getitem), + __setitem__ = interp2app(W_FlagsObject.descr_setitem), + + contiguous = GetSetProperty(W_FlagsObject.descr_get_contiguous), + c_contiguous = GetSetProperty(W_FlagsObject.descr_get_contiguous), + f_contiguous = GetSetProperty(W_FlagsObject.descr_get_fortran), + fortran = GetSetProperty(W_FlagsObject.descr_get_fortran), + writeable = GetSetProperty(W_FlagsObject.descr_get_writeable), +) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -9,6 +9,7 @@ from pypy.module.micronumpy.strides import find_shape_and_elems,\ get_shape_from_iterable, to_coords, shape_agreement, \ shape_agreement_multiple +from pypy.module.micronumpy.interp_flagsobj import W_FlagsObject from pypy.module.micronumpy.interp_flatiter import W_FlatIterator from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy import loop @@ -610,13 +611,11 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "dumps not implemented yet")) + w_flags = None def descr_get_flags(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - "getting flags not implemented yet")) - - def descr_set_flags(self, space, w_args): - raise OperationError(space.w_NotImplementedError, space.wrap( - "setting flags not implemented yet")) + if self.w_flags is None: + self.w_flags = W_FlagsObject(self) + return self.w_flags @unwrap_spec(offset=int) def descr_getfield(self, space, w_dtype, offset): @@ -1203,6 +1202,7 @@ size = GetSetProperty(W_NDimArray.descr_get_size), itemsize = GetSetProperty(W_NDimArray.descr_get_itemsize), nbytes = GetSetProperty(W_NDimArray.descr_get_nbytes), + flags = GetSetProperty(W_NDimArray.descr_get_flags), fill = interp2app(W_NDimArray.descr_fill), tostring = interp2app(W_NDimArray.descr_tostring), diff --git a/pypy/module/micronumpy/test/test_flagsobj.py b/pypy/module/micronumpy/test/test_flagsobj.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_flagsobj.py @@ -0,0 +1,17 @@ +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestFlagsObj(BaseNumpyAppTest): + def test_repr(self): + import numpy as np + a = np.array([1,2,3]) + assert repr(type(a.flags)) == "" + + def test_flags(self): + import numpy as np + a = np.array([1,2,3]) + assert a.flags.c_contiguous == True + assert a.flags['W'] == True + raises(KeyError, "a.flags['blah']") + raises(KeyError, "a.flags['C_CONTIGUOUS'] = False") + raises((TypeError, AttributeError), "a.flags.c_contiguous = False") From noreply at buildbot.pypy.org Tue Nov 12 22:43:18 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 12 Nov 2013 22:43:18 +0100 (CET) Subject: [pypy-commit] pypy default: provide flags for scalars also Message-ID: <20131112214318.9FFCC1C0206@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67994:592629fa85f2 Date: 2013-11-12 16:29 -0500 http://bitbucket.org/pypy/pypy/changeset/592629fa85f2/ Log: provide flags for scalars also diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -11,6 +11,7 @@ from rpython.rtyper.lltypesystem import rffi from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy.arrayimpl.voidbox import VoidBoxStorage +from pypy.module.micronumpy.interp_flagsobj import W_FlagsObject from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rstring import StringBuilder @@ -113,7 +114,7 @@ class W_GenericBox(W_Root): - _attrs_ = [] + _attrs_ = ['w_flags'] def descr__new__(space, w_subtype, __args__): raise operationerrfmt(space.w_TypeError, @@ -292,6 +293,12 @@ def descr_copy(self, space): return self.convert_to(self.get_dtype(space)) + w_flags = None + def descr_get_flags(self, space): + if self.w_flags is None: + self.w_flags = W_FlagsObject(self) + return self.w_flags + class W_BoolBox(W_GenericBox, PrimitiveBox): descr__new__, _get_dtype, descr_reduce = new_dtype_getter("bool") @@ -550,6 +557,7 @@ strides = GetSetProperty(W_GenericBox.descr_get_shape), ndim = GetSetProperty(W_GenericBox.descr_get_ndim), T = GetSetProperty(W_GenericBox.descr_self), + flags = GetSetProperty(W_GenericBox.descr_get_flags), ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/test/test_flagsobj.py b/pypy/module/micronumpy/test/test_flagsobj.py --- a/pypy/module/micronumpy/test/test_flagsobj.py +++ b/pypy/module/micronumpy/test/test_flagsobj.py @@ -7,7 +7,7 @@ a = np.array([1,2,3]) assert repr(type(a.flags)) == "" - def test_flags(self): + def test_array_flags(self): import numpy as np a = np.array([1,2,3]) assert a.flags.c_contiguous == True @@ -15,3 +15,8 @@ raises(KeyError, "a.flags['blah']") raises(KeyError, "a.flags['C_CONTIGUOUS'] = False") raises((TypeError, AttributeError), "a.flags.c_contiguous = False") + + def test_scalar_flags(self): + import numpy as np + a = np.int32(2) + assert a.flags.c_contiguous == True From noreply at buildbot.pypy.org Tue Nov 12 23:44:54 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Nov 2013 23:44:54 +0100 (CET) Subject: [pypy-commit] pypy default: Write 'ConstPtr(null)' when we know the constant is null. Message-ID: <20131112224454.E79F11C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67995:4d9da705ae8f Date: 2013-11-12 23:44 +0100 http://bitbucket.org/pypy/pypy/changeset/4d9da705ae8f/ Log: Write 'ConstPtr(null)' when we know the constant is null. May break some tests in pypyjit/test_pypy_c. diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -103,7 +103,9 @@ elif isinstance(arg, BoxInt): return 'i' + str(mv) elif isinstance(arg, self.ts.ConstRef): - return 'ConstPtr(ptr' + str(mv) + ')' + if arg.value: + return 'ConstPtr(ptr' + str(mv) + ')' + return 'ConstPtr(null)' elif isinstance(arg, self.ts.BoxRef): return 'p' + str(mv) elif isinstance(arg, ConstFloat): From noreply at buildbot.pypy.org Tue Nov 12 23:46:07 2013 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 12 Nov 2013 23:46:07 +0100 (CET) Subject: [pypy-commit] pypy windows-packaging: add dlls for Tkinter Message-ID: <20131112224607.730331C00F8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: windows-packaging Changeset: r67996:9b968361f205 Date: 2013-11-13 00:41 +0200 http://bitbucket.org/pypy/pypy/changeset/9b968361f205/ Log: add dlls for Tkinter diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -112,6 +112,10 @@ incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform == 'win32': + incdirs = [] + linklibs = ['tcl85', 'tk85'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -157,6 +157,21 @@ ms\do_ms.bat nmake -f ms\nt.mak install +TkInter module support +~~~~~~~~~~~~~~~~~~~~~~ + +Download tcl from http:://http://www.tcl.tk/software/tcltk/download.html +Extract tcl85. cd into the ``win`` directory and compile:: + + nmake -nologo -f makefile.vc release OPTS=symbols,threads + +Download tcl from http:://http://www.tcl.tk/software/tcltk/download.html +Extract tcl85. cd into the ``win`` directory and compile:: + + set TCLDIR= + nmake -nologo -f makefile.vc release OPTS=symbols,threads + + Using the mingw compiler ------------------------ diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -67,18 +67,22 @@ raise PyPyCNotFound( 'Bogus path: %r does not exist (see docstring for more info)' % (os.path.dirname(str(pypy_c)),)) + win_extras = ['libpypy-c.dll', 'libexpat.dll', 'sqlite3.dll', + 'libeay32.dll', 'ssleay32.dll'] subprocess.check_call([str(pypy_c), '-c', 'import _sqlite3']) if not sys.platform == 'win32': subprocess.check_call([str(pypy_c), '-c', 'import _curses']) subprocess.check_call([str(pypy_c), '-c', 'import syslog']) - if not withouttk: - try: - subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) - except subprocess.CalledProcessError: - print >>sys.stderr, """Building Tk bindings failed. + if not withouttk: + try: + subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) + except subprocess.CalledProcessError: + print >>sys.stderr, """Building Tk bindings failed. You can either install Tk development headers package or add --without-tk option to skip packaging binary CFFI extension.""" - sys.exit(1) + sys.exit(1) + #Can the dependencies be found from cffi somehow? + win_extras += ['tk85t.dll', 'tk85.dll', 'tcl85t.dll', 'tcl85.dll'] if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'): rename_pypy_c += '.exe' binaries = [(pypy_c, rename_pypy_c)] @@ -101,9 +105,7 @@ # Can't rename a DLL: it is always called 'libpypy-c.dll' - for extra in ['libpypy-c.dll', - 'libexpat.dll', 'sqlite3.dll', - 'libeay32.dll', 'ssleay32.dll']: + for extra in win_extras: p = pypy_c.dirpath().join(extra) if not p.check(): p = py.path.local.sysfind(extra) From noreply at buildbot.pypy.org Wed Nov 13 00:07:37 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 13 Nov 2013 00:07:37 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: py3k update #12 Message-ID: <20131112230737.20B981C00F8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: extradoc Changeset: r5109:ea4e00abbf96 Date: 2013-11-12 15:06 -0800 http://bitbucket.org/pypy/extradoc/changeset/ea4e00abbf96/ Log: py3k update #12 diff --git a/blog/draft/py3k-status-update-12.rst b/blog/draft/py3k-status-update-12.rst new file mode 100644 --- /dev/null +++ b/blog/draft/py3k-status-update-12.rst @@ -0,0 +1,45 @@ +Py3k status update #12 +---------------------- + +This is the 12th status update about our work on the `py3k branch`_, which we +can work on thanks to all of the people who donated_ to the `py3k proposal`_. + +Here's an update on the recent progress: + +* Thank you to everyone who has provided initial feedback on the PyPy3 2.1 beta + 1 release. We've gotten a number of bug reports, most of which have been + fixed. + +* As usual, we're continually keeping up with changes from the default + branch. Oftentimes these merges come at a cost (conflicts and or + reintegration of py3k changes) but occasionally we get goodies for free, such + as the `recent JIT optimizations`_ and `incremental garbage collection`_. + +* We've been focusing on re-optimizing Python 2 int sized (machine sized) + integers: + +We have a couple of known, notable speed regressions in the PyPy3 beta release +vs regular PyPy. The major one being with Python 2.x int sized (or machine +sized) integers. + +Python 3 drops the distinction between int and long types. CPython 3.x +accomplishes this by removing the old int type entirely and renaming the long +type to int. Initially, we've done the same for PyPy3 for the sake of +simplicity and getting everything working. + +However PyPy's JIT is capable of heavily optimizing these machine sized integer +operations, so this came with a regression in performance in this area. + +We're now in the process of solving this. Part of this work also involves some +house cleaning on these numeric types which will also benefit the default +branch. + +cheers, +Phil + +.. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`py3k branch`: https://bitbucket.org/pypy/pypy/commits/all/tip/branch%28%22py3k%22%29 + +.. _`recent JIT optimizations`: http://morepypy.blogspot.com/2013/10/making-coveragepy-faster-under-pypy.html +.. _`incremental garbage collection`: http://morepypy.blogspot.com/2013/10/incremental-garbage-collector-in-pypy.html From noreply at buildbot.pypy.org Wed Nov 13 01:12:37 2013 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 13 Nov 2013 01:12:37 +0100 (CET) Subject: [pypy-commit] pypy default: document sqlite3.dll version for windows Message-ID: <20131113001237.988221C00F8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r67997:df0777fb0d9f Date: 2013-11-13 02:10 +0200 http://bitbucket.org/pypy/pypy/changeset/df0777fb0d9f/ Log: document sqlite3.dll version for windows diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -118,14 +118,10 @@ The sqlite3 database library ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Download http://www.sqlite.org/2013/sqlite-amalgamation-3071601.zip and extract -it into a directory under the base directory. Also get -http://www.sqlite.org/2013/sqlite-dll-win32-x86-3071601.zip and extract the dll -into the bin directory, and the sqlite3.def into the sources directory. -Now build the import library so cffi can use the header and dll:: +PyPy uses cffi to interact with sqlite3.dll. Only the dll is needed, the cffi +wrapper is compiled when the module is imported for the first time. +The sqlite3.dll should be version 3.6.21 for CPython2.7 compatablility. - lib /DEF:sqlite3.def" /OUT:sqlite3.lib" - copy sqlite3.lib path\to\libs The expat XML parser From noreply at buildbot.pypy.org Wed Nov 13 01:56:02 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 13 Nov 2013 01:56:02 +0100 (CET) Subject: [pypy-commit] pypy default: clean up some formatting Message-ID: <20131113005602.EB8FC1C0F88@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67998:bda39936972c Date: 2013-11-12 19:30 -0500 http://bitbucket.org/pypy/pypy/changeset/bda39936972c/ Log: clean up some formatting diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -98,11 +98,11 @@ def getitem_filter(self, space, arr): if len(arr.get_shape()) > 1 and arr.get_shape() != self.get_shape(): - raise OperationError(space.w_ValueError, - space.wrap("boolean index array should have 1 dimension")) + raise OperationError(space.w_ValueError, space.wrap( + "boolean index array should have 1 dimension")) if arr.get_size() > self.get_size(): - raise OperationError(space.w_ValueError, - space.wrap("index out of range for array")) + raise OperationError(space.w_ValueError, space.wrap( + "index out of range for array")) size = loop.count_all_true(arr) if len(arr.get_shape()) == 1: res_shape = [size] + self.get_shape()[1:] @@ -113,17 +113,18 @@ def setitem_filter(self, space, idx, val): if len(idx.get_shape()) > 1 and idx.get_shape() != self.get_shape(): - raise OperationError(space.w_ValueError, - space.wrap("boolean index array should have 1 dimension")) + raise OperationError(space.w_ValueError, space.wrap( + "boolean index array should have 1 dimension")) if idx.get_size() > self.get_size(): - raise OperationError(space.w_ValueError, - space.wrap("index out of range for array")) + raise OperationError(space.w_ValueError, space.wrap( + "index out of range for array")) size = loop.count_all_true(idx) if size > val.get_size() and val.get_size() != 1: - raise OperationError(space.w_ValueError, space.wrap("NumPy boolean array indexing assignment " - "cannot assign %d input values to " - "the %d output values where the mask is true" % - (val.get_size(), size))) + raise OperationError(space.w_ValueError, space.wrap( + "NumPy boolean array indexing assignment " + "cannot assign %d input values to " + "the %d output values where the mask is true" % + (val.get_size(), size))) loop.setitem_filter(self, idx, val, size) def _prepare_array_index(self, space, w_index): From noreply at buildbot.pypy.org Wed Nov 13 01:56:04 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 13 Nov 2013 01:56:04 +0100 (CET) Subject: [pypy-commit] pypy default: fix missing coerce in setitem_filter Message-ID: <20131113005604.588011C0F88@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r67999:63c04f0fba9f Date: 2013-11-12 19:42 -0500 http://bitbucket.org/pypy/pypy/changeset/63c04f0fba9f/ Log: fix missing coerce in setitem_filter diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -125,7 +125,7 @@ "cannot assign %d input values to " "the %d output values where the mask is true" % (val.get_size(), size))) - loop.setitem_filter(self, idx, val, size) + loop.setitem_filter(space, self, idx, val, size) def _prepare_array_index(self, space, w_index): if isinstance(w_index, W_NDimArray): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -398,7 +398,7 @@ 'index_dtype'], reds = 'auto') -def setitem_filter(arr, index, value, size): +def setitem_filter(space, arr, index, value, size): arr_iter = arr.create_iter() shapelen = len(arr.get_shape()) if shapelen > 1 and len(index.get_shape()) < 2: @@ -414,7 +414,7 @@ arr_dtype=arr_dtype, ) if index_iter.getitem_bool(): - arr_iter.setitem(value_iter.getitem()) + arr_iter.setitem(arr_dtype.coerce(space, value_iter.getitem())) value_iter.next() arr_iter.next() index_iter.next() diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -511,6 +511,13 @@ for i in xrange(5): assert a[i] == i + def test_setitem_array(self): + import numpy as np + a = np.array((-1., 0, 1))/0. + b = np.array([False, False, True], dtype=bool) + a[b] = 100 + assert a[2] == 100 + def test_setitem_obj_index(self): from numpypy import arange a = arange(10) From noreply at buildbot.pypy.org Wed Nov 13 02:28:37 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 13 Nov 2013 02:28:37 +0100 (CET) Subject: [pypy-commit] pypy default: raise coerce conversion errors at app-level Message-ID: <20131113012837.D07581C0206@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68000:d50d27c01818 Date: 2013-11-12 20:20 -0500 http://bitbucket.org/pypy/pypy/changeset/d50d27c01818/ Log: raise coerce conversion errors at app-level diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -198,8 +198,7 @@ prefix) def descr_getitem(self, space, w_idx): - if (isinstance(w_idx, W_NDimArray) and - w_idx.get_dtype().is_bool_type()): + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type(): return self.getitem_filter(space, w_idx) try: return self.implementation.descr_getitem(space, self, w_idx) @@ -213,9 +212,11 @@ self.implementation.setitem_index(space, index_list, w_value) def descr_setitem(self, space, w_idx, w_value): - if (isinstance(w_idx, W_NDimArray) and - w_idx.get_dtype().is_bool_type()): - self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type(): + try: + self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) + except ValueError, e: + raise OperationError(space.w_ValueError, space.wrap(str(e))) return try: self.implementation.descr_setitem(space, self, w_idx, w_value) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2454,6 +2454,23 @@ assert exc.value[0].find('cannot assign') >= 0 assert (a == [[0, 1], [2, 3], [4, 5]]).all() + def test_nonarray_assignment(self): + import numpypy as np + a = np.arange(10) + b = np.ones(10, dtype=bool) + r = np.arange(10) + def assign(a, b, c): + a[b] = c + raises(ValueError, assign, a, b, np.nan) + #raises(ValueError, assign, a, r, np.nan) # XXX + import sys + if '__pypy__' not in sys.builtin_module_names: + a[b] = np.array(np.nan) + #a[r] = np.array(np.nan) + else: + raises(ValueError, assign, a, b, np.array(np.nan)) + #raises(ValueError, assign, a, r, np.array(np.nan)) + def test_copy_kwarg(self): from numpypy import array x = array([1, 2, 3]) From noreply at buildbot.pypy.org Wed Nov 13 04:38:09 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 13 Nov 2013 04:38:09 +0100 (CET) Subject: [pypy-commit] pypy default: support ndarray.reshape(()) to reshape to scalar Message-ID: <20131113033809.ACFF11C07D2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68001:b89a1dc6a9e1 Date: 2013-11-12 22:32 -0500 http://bitbucket.org/pypy/pypy/changeset/b89a1dc6a9e1/ Log: support ndarray.reshape(()) to reshape to scalar diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -73,6 +73,8 @@ return SliceArray(self.start, new_strides, new_backstrides, new_shape, self, orig_array) else: + if self.get_size() == 1 and len(new_shape) == 0: + return scalar.Scalar(self.dtype, self.getitem(0)) return None def get_view(self, orig_array, dtype, new_shape): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -710,7 +710,14 @@ def test_reshape(self): from numpypy import array, zeros + for a in [array(1), array([1])]: + for s in [(), (1,)]: + b = a.reshape(s) + assert b.shape == s + assert (b == [1]).all() a = array(range(12)) + exc = raises(ValueError, "b = a.reshape(())") + assert str(exc.value) == "total size of new array must be unchanged" exc = raises(ValueError, "b = a.reshape((3, 10))") assert str(exc.value) == "total size of new array must be unchanged" b = a.reshape((3, 4)) From noreply at buildbot.pypy.org Wed Nov 13 09:16:11 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 13 Nov 2013 09:16:11 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: start light, reflow the paras Message-ID: <20131113081611.A12A11C0134@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: release-2.2.x Changeset: r68002:725bbfa926bd Date: 2013-11-13 10:15 +0200 http://bitbucket.org/pypy/pypy/changeset/725bbfa926bd/ Log: start light, reflow the paras diff --git a/pypy/doc/release-2.2.0.rst b/pypy/doc/release-2.2.0.rst --- a/pypy/doc/release-2.2.0.rst +++ b/pypy/doc/release-2.2.0.rst @@ -45,23 +45,27 @@ Highlights ========== -* Our Garbage Collector is now "incremental". It should avoid almost all pauses due - to a major collection taking place. Previously, it would pause the program (rarely) - to walk all live objects, which could take arbitrarily long if your process is using - a whole lot of RAM. Now the same work is done in steps. This should make PyPy - more responsive, e.g. in games. There are still other pauses, from the GC and the JIT, - but they should be on the order of 5 milliseconds each. +* Our Garbage Collector is now "incremental". It should avoid almost + all pauses due to a major collection taking place. Previously, it + would pause the program (rarely) to walk all live objects, which + could take arbitrarily long if your process is using a whole lot of + RAM. Now the same work is done in steps. This should make PyPy + more responsive, e.g. in games. There are still other pauses, from + the GC and the JIT, but they should be on the order of 5 + milliseconds each. -* The JIT counters for hot code were never reset, which meant that a process running - for long enough would eventually JIT-compile more and more rarely executed code. - Not only is it useless to compile such code, but as more compiled code means more - memory used, this gives the impression of a memory leak. This has been tentatively +* The JIT counters for hot code were never reset, which meant that a + process running for long enough would eventually JIT-compile more + and more rarely executed code. Not only is it useless to compile + such code, but as more compiled code means more memory used, this + gives the impression of a memory leak. This has been tentatively fixed by decreasing the counters from time to time. -* NumPy has been split: now PyPy only contains the core module, called ``_numpypy``. - The ``numpy`` module itself has been moved to ``https://bitbucket.org/pypy/numpy``. - You need to install it separately in a virtualenv with - ``pip install git+https://bitbucket.org/pypy/numpy.git``. +* NumPy has been split: now PyPy only contains the core module, called + ``_numpypy``. The ``numpy`` module itself has been moved to + ``https://bitbucket.org/pypy/numpy``. You need to install it + separately in a virtualenv with ``pip install + git+https://bitbucket.org/pypy/numpy.git``. * improvements to non-inlined calls @@ -71,17 +75,19 @@ * improvements in buffer copying -* tk is supported (XXX was already in pypy 2.1 it seems?? maybe not correctly packaged?) +* tk is supported (XXX was already in pypy 2.1 it seems?? maybe not + correctly packaged?) -* We finally wrote all the missing ``os.xxx()`` functions. There are a lot of strange - ones that nobody ever heard about, except those who really need them. +* We finally wrote all the missing ``os.xxx()`` functions. There are + a lot of strange ones that nobody ever heard about, except those who + really need them. * numpy C API - the core module is included in PyPy 2.2, but you must now install - an external fork of numpy from https://bitbucket.org/pypy/numpy + the core module is included in PyPy 2.2, but you must now install an + external fork of numpy from https://bitbucket.org/pypy/numpy -removed in favor of an external numpy fork -at https://bitbucket.org/pypy/numpy +removed in favor of an external numpy fork at +https://bitbucket.org/pypy/numpy From noreply at buildbot.pypy.org Wed Nov 13 10:49:38 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 13 Nov 2013 10:49:38 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: work on the announcement Message-ID: <20131113094938.5CD011C13B9@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: release-2.2.x Changeset: r68003:b2133aa0bf11 Date: 2013-11-13 11:48 +0200 http://bitbucket.org/pypy/pypy/changeset/b2133aa0bf11/ Log: work on the announcement diff --git a/pypy/doc/release-2.2.0.rst b/pypy/doc/release-2.2.0.rst --- a/pypy/doc/release-2.2.0.rst +++ b/pypy/doc/release-2.2.0.rst @@ -67,27 +67,20 @@ separately in a virtualenv with ``pip install git+https://bitbucket.org/pypy/numpy.git``. -* improvements to non-inlined calls +* non-inlined calls have less overhead -* sys.set_trace is now JITted (think coverage) +* Things that use ``sys.set_trace`` are now JITted (like coverage) -* faster json +* JSON encoding is faster -* improvements in buffer copying - -* tk is supported (XXX was already in pypy 2.1 it seems?? maybe not - correctly packaged?) +* various buffer copying methods experience speedups (like list-of-ints to + ``int[]`` buffer from cffi) * We finally wrote all the missing ``os.xxx()`` functions. There are a lot of strange ones that nobody ever heard about, except those who really need them. -* numpy C API +* numpy has a rudimentary C API that cooperates with ``cpyext`` - - the core module is included in PyPy 2.2, but you must now install an - external fork of numpy from https://bitbucket.org/pypy/numpy - - -removed in favor of an external numpy fork at -https://bitbucket.org/pypy/numpy +Cheers, +Armin Rigo and Maciej Fijalkowski From noreply at buildbot.pypy.org Wed Nov 13 11:35:54 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 13 Nov 2013 11:35:54 +0100 (CET) Subject: [pypy-commit] pypy armhf-singlefloat: merge default Message-ID: <20131113103554.A22621C13E5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: armhf-singlefloat Changeset: r68004:c0363615e405 Date: 2013-11-13 11:34 +0100 http://bitbucket.org/pypy/pypy/changeset/c0363615e405/ Log: merge default diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -118,14 +118,10 @@ The sqlite3 database library ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Download http://www.sqlite.org/2013/sqlite-amalgamation-3071601.zip and extract -it into a directory under the base directory. Also get -http://www.sqlite.org/2013/sqlite-dll-win32-x86-3071601.zip and extract the dll -into the bin directory, and the sqlite3.def into the sources directory. -Now build the import library so cffi can use the header and dll:: +PyPy uses cffi to interact with sqlite3.dll. Only the dll is needed, the cffi +wrapper is compiled when the module is imported for the first time. +The sqlite3.dll should be version 3.6.21 for CPython2.7 compatablility. - lib /DEF:sqlite3.def" /OUT:sqlite3.lib" - copy sqlite3.lib path\to\libs The expat XML parser diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -73,6 +73,8 @@ return SliceArray(self.start, new_strides, new_backstrides, new_shape, self, orig_array) else: + if self.get_size() == 1 and len(new_shape) == 0: + return scalar.Scalar(self.dtype, self.getitem(0)) return None def get_view(self, orig_array, dtype, new_shape): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -11,6 +11,7 @@ from rpython.rtyper.lltypesystem import rffi from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy.arrayimpl.voidbox import VoidBoxStorage +from pypy.module.micronumpy.interp_flagsobj import W_FlagsObject from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rstring import StringBuilder @@ -113,7 +114,7 @@ class W_GenericBox(W_Root): - _attrs_ = [] + _attrs_ = ['w_flags'] def descr__new__(space, w_subtype, __args__): raise operationerrfmt(space.w_TypeError, @@ -292,6 +293,12 @@ def descr_copy(self, space): return self.convert_to(self.get_dtype(space)) + w_flags = None + def descr_get_flags(self, space): + if self.w_flags is None: + self.w_flags = W_FlagsObject(self) + return self.w_flags + class W_BoolBox(W_GenericBox, PrimitiveBox): descr__new__, _get_dtype, descr_reduce = new_dtype_getter("bool") @@ -550,6 +557,7 @@ strides = GetSetProperty(W_GenericBox.descr_get_shape), ndim = GetSetProperty(W_GenericBox.descr_get_ndim), T = GetSetProperty(W_GenericBox.descr_self), + flags = GetSetProperty(W_GenericBox.descr_get_flags), ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -401,7 +401,7 @@ raise operationerrfmt(space.w_TypeError, msg, w_dtype) W_Dtype.typedef = TypeDef("dtype", - __module__ = "numpypy", + __module__ = "numpy", __new__ = interp2app(descr__new__), __str__= interp2app(W_Dtype.descr_str), diff --git a/pypy/module/micronumpy/interp_flagsobj.py b/pypy/module/micronumpy/interp_flagsobj.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/interp_flagsobj.py @@ -0,0 +1,45 @@ +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.error import OperationError + + +class W_FlagsObject(W_Root): + def __init__(self, arr): + self.arr = arr + + def descr_get_contiguous(self, space): + return space.w_True + + def descr_get_fortran(self, space): + return space.w_False + + def descr_get_writeable(self, space): + return space.w_True + + def descr_getitem(self, space, w_item): + key = space.str_w(w_item) + if key == "C" or key == "CONTIGUOUS" or key == "C_CONTIGUOUS": + return self.descr_get_contiguous(space) + if key == "F" or key == "FORTRAN" or key == "F_CONTIGUOUS": + return self.descr_get_fortran(space) + if key == "W" or key == "WRITEABLE": + return self.descr_get_writeable(space) + raise OperationError(space.w_KeyError, space.wrap( + "Unknown flag")) + + def descr_setitem(self, space, w_item, w_value): + raise OperationError(space.w_KeyError, space.wrap( + "Unknown flag")) + +W_FlagsObject.typedef = TypeDef("flagsobj", + __module__ = "numpy", + __getitem__ = interp2app(W_FlagsObject.descr_getitem), + __setitem__ = interp2app(W_FlagsObject.descr_setitem), + + contiguous = GetSetProperty(W_FlagsObject.descr_get_contiguous), + c_contiguous = GetSetProperty(W_FlagsObject.descr_get_contiguous), + f_contiguous = GetSetProperty(W_FlagsObject.descr_get_fortran), + fortran = GetSetProperty(W_FlagsObject.descr_get_fortran), + writeable = GetSetProperty(W_FlagsObject.descr_get_writeable), +) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -9,6 +9,7 @@ from pypy.module.micronumpy.strides import find_shape_and_elems,\ get_shape_from_iterable, to_coords, shape_agreement, \ shape_agreement_multiple +from pypy.module.micronumpy.interp_flagsobj import W_FlagsObject from pypy.module.micronumpy.interp_flatiter import W_FlatIterator from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy import loop @@ -97,11 +98,11 @@ def getitem_filter(self, space, arr): if len(arr.get_shape()) > 1 and arr.get_shape() != self.get_shape(): - raise OperationError(space.w_ValueError, - space.wrap("boolean index array should have 1 dimension")) + raise OperationError(space.w_ValueError, space.wrap( + "boolean index array should have 1 dimension")) if arr.get_size() > self.get_size(): - raise OperationError(space.w_ValueError, - space.wrap("index out of range for array")) + raise OperationError(space.w_ValueError, space.wrap( + "index out of range for array")) size = loop.count_all_true(arr) if len(arr.get_shape()) == 1: res_shape = [size] + self.get_shape()[1:] @@ -112,18 +113,19 @@ def setitem_filter(self, space, idx, val): if len(idx.get_shape()) > 1 and idx.get_shape() != self.get_shape(): - raise OperationError(space.w_ValueError, - space.wrap("boolean index array should have 1 dimension")) + raise OperationError(space.w_ValueError, space.wrap( + "boolean index array should have 1 dimension")) if idx.get_size() > self.get_size(): - raise OperationError(space.w_ValueError, - space.wrap("index out of range for array")) + raise OperationError(space.w_ValueError, space.wrap( + "index out of range for array")) size = loop.count_all_true(idx) if size > val.get_size() and val.get_size() != 1: - raise OperationError(space.w_ValueError, space.wrap("NumPy boolean array indexing assignment " - "cannot assign %d input values to " - "the %d output values where the mask is true" % - (val.get_size(), size))) - loop.setitem_filter(self, idx, val, size) + raise OperationError(space.w_ValueError, space.wrap( + "NumPy boolean array indexing assignment " + "cannot assign %d input values to " + "the %d output values where the mask is true" % + (val.get_size(), size))) + loop.setitem_filter(space, self, idx, val, size) def _prepare_array_index(self, space, w_index): if isinstance(w_index, W_NDimArray): @@ -196,8 +198,7 @@ prefix) def descr_getitem(self, space, w_idx): - if (isinstance(w_idx, W_NDimArray) and - w_idx.get_dtype().is_bool_type()): + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type(): return self.getitem_filter(space, w_idx) try: return self.implementation.descr_getitem(space, self, w_idx) @@ -211,9 +212,11 @@ self.implementation.setitem_index(space, index_list, w_value) def descr_setitem(self, space, w_idx, w_value): - if (isinstance(w_idx, W_NDimArray) and - w_idx.get_dtype().is_bool_type()): - self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type(): + try: + self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) + except ValueError, e: + raise OperationError(space.w_ValueError, space.wrap(str(e))) return try: self.implementation.descr_setitem(space, self, w_idx, w_value) @@ -338,11 +341,11 @@ Returns an array containing the same data with a new shape. - Refer to `numpypy.reshape` for full documentation. + Refer to `numpy.reshape` for full documentation. See Also -------- - numpypy.reshape : equivalent function + numpy.reshape : equivalent function """ args_w, kw_w = __args__.unpack() order = NPY_CORDER @@ -610,13 +613,11 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "dumps not implemented yet")) + w_flags = None def descr_get_flags(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - "getting flags not implemented yet")) - - def descr_set_flags(self, space, w_args): - raise OperationError(space.w_NotImplementedError, space.wrap( - "setting flags not implemented yet")) + if self.w_flags is None: + self.w_flags = W_FlagsObject(self) + return self.w_flags @unwrap_spec(offset=int) def descr_getfield(self, space, w_dtype, offset): @@ -1123,9 +1124,8 @@ return res """, filename=__file__).interphook('ptp') -W_NDimArray.typedef = TypeDef( - "ndarray", - __module__ = "numpypy", +W_NDimArray.typedef = TypeDef("ndarray", + __module__ = "numpy", __new__ = interp2app(descr_new_array), __len__ = interp2app(W_NDimArray.descr_len), @@ -1204,6 +1204,7 @@ size = GetSetProperty(W_NDimArray.descr_get_size), itemsize = GetSetProperty(W_NDimArray.descr_get_itemsize), nbytes = GetSetProperty(W_NDimArray.descr_get_nbytes), + flags = GetSetProperty(W_NDimArray.descr_get_flags), fill = interp2app(W_NDimArray.descr_fill), tostring = interp2app(W_NDimArray.descr_tostring), @@ -1391,8 +1392,8 @@ return box -W_FlatIterator.typedef = TypeDef( - 'flatiter', +W_FlatIterator.typedef = TypeDef("flatiter", + __module__ = "numpy", __iter__ = interp2app(W_FlatIterator.descr_iter), __getitem__ = interp2app(W_FlatIterator.descr_getitem), __setitem__ = interp2app(W_FlatIterator.descr_setitem), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -413,7 +413,7 @@ W_Ufunc.typedef = TypeDef("ufunc", - __module__ = "numpypy", + __module__ = "numpy", __call__ = interp2app(W_Ufunc.descr_call), __repr__ = interp2app(W_Ufunc.descr_repr), diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -398,7 +398,7 @@ 'index_dtype'], reds = 'auto') -def setitem_filter(arr, index, value, size): +def setitem_filter(space, arr, index, value, size): arr_iter = arr.create_iter() shapelen = len(arr.get_shape()) if shapelen > 1 and len(index.get_shape()) < 2: @@ -414,7 +414,7 @@ arr_dtype=arr_dtype, ) if index_iter.getitem_bool(): - arr_iter.setitem(value_iter.getitem()) + arr_iter.setitem(arr_dtype.coerce(space, value_iter.getitem())) value_iter.next() arr_iter.next() index_iter.next() diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -26,6 +26,7 @@ assert d.kind == 'b' assert dtype(d) is d assert dtype('bool') is d + assert repr(type(d)) == "" assert dtype('int8').num == 1 assert dtype('int8').name == 'int8' diff --git a/pypy/module/micronumpy/test/test_flagsobj.py b/pypy/module/micronumpy/test/test_flagsobj.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_flagsobj.py @@ -0,0 +1,22 @@ +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestFlagsObj(BaseNumpyAppTest): + def test_repr(self): + import numpy as np + a = np.array([1,2,3]) + assert repr(type(a.flags)) == "" + + def test_array_flags(self): + import numpy as np + a = np.array([1,2,3]) + assert a.flags.c_contiguous == True + assert a.flags['W'] == True + raises(KeyError, "a.flags['blah']") + raises(KeyError, "a.flags['C_CONTIGUOUS'] = False") + raises((TypeError, AttributeError), "a.flags.c_contiguous = False") + + def test_scalar_flags(self): + import numpy as np + a = np.int32(2) + assert a.flags.c_contiguous == True diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -249,9 +249,11 @@ return CustomIntObject(value) def test_ndarray(self): - from numpypy import ndarray, array, dtype + from numpy import ndarray, array, dtype, flatiter assert type(ndarray) is type + assert repr(ndarray) == "" + assert repr(flatiter) == "" assert type(array) is not type a = ndarray((2, 3)) assert a.shape == (2, 3) @@ -509,6 +511,13 @@ for i in xrange(5): assert a[i] == i + def test_setitem_array(self): + import numpy as np + a = np.array((-1., 0, 1))/0. + b = np.array([False, False, True], dtype=bool) + a[b] = 100 + assert a[2] == 100 + def test_setitem_obj_index(self): from numpypy import arange a = arange(10) @@ -701,7 +710,14 @@ def test_reshape(self): from numpypy import array, zeros + for a in [array(1), array([1])]: + for s in [(), (1,)]: + b = a.reshape(s) + assert b.shape == s + assert (b == [1]).all() a = array(range(12)) + exc = raises(ValueError, "b = a.reshape(())") + assert str(exc.value) == "total size of new array must be unchanged" exc = raises(ValueError, "b = a.reshape((3, 10))") assert str(exc.value) == "total size of new array must be unchanged" b = a.reshape((3, 4)) @@ -2445,6 +2461,23 @@ assert exc.value[0].find('cannot assign') >= 0 assert (a == [[0, 1], [2, 3], [4, 5]]).all() + def test_nonarray_assignment(self): + import numpypy as np + a = np.arange(10) + b = np.ones(10, dtype=bool) + r = np.arange(10) + def assign(a, b, c): + a[b] = c + raises(ValueError, assign, a, b, np.nan) + #raises(ValueError, assign, a, r, np.nan) # XXX + import sys + if '__pypy__' not in sys.builtin_module_names: + a[b] = np.array(np.nan) + #a[r] = np.array(np.nan) + else: + raises(ValueError, assign, a, b, np.array(np.nan)) + #raises(ValueError, assign, a, r, np.array(np.nan)) + def test_copy_kwarg(self): from numpypy import array x = array([1, 2, 3]) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -81,7 +81,7 @@ assert isinstance(add, ufunc) assert repr(add) == "" - assert repr(ufunc) == "" or repr(ufunc) == "" + assert repr(ufunc) == "" def test_ufunc_attrs(self): from numpypy import add, multiply, sin diff --git a/pypy/module/test_lib_pypy/test_grp_extra.py b/pypy/module/test_lib_pypy/test_grp_extra.py --- a/pypy/module/test_lib_pypy/test_grp_extra.py +++ b/pypy/module/test_lib_pypy/test_grp_extra.py @@ -2,8 +2,7 @@ class AppTestGrp: - - spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + spaceconfig = dict(usemodules=('binascii', '_ffi', '_rawffi', 'itertools')) def setup_class(cls): cls.w_grp = import_lib_pypy(cls.space, 'grp', @@ -11,14 +10,18 @@ def test_basic(self): raises(KeyError, self.grp.getgrnam, "dEkLofcG") - try: - g = self.grp.getgrnam("root") - except KeyError: - return # no 'root' group on OS/X? - assert g.gr_gid == 0 - assert g.gr_mem == ['root'] or g.gr_mem == [] - assert g.gr_name == 'root' - assert isinstance(g.gr_passwd, str) # usually just 'x', don't hope :-) + for name in ["root", "wheel"]: + try: + g = self.grp.getgrnam(name) + except KeyError: + continue + assert g.gr_gid == 0 + assert g.gr_mem == ['root'] or g.gr_mem == [] + assert g.gr_name == name + assert isinstance(g.gr_passwd, str) # usually just 'x', don't hope :-) + break + else: + raise def test_extra(self): grp = self.grp diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -103,7 +103,9 @@ elif isinstance(arg, BoxInt): return 'i' + str(mv) elif isinstance(arg, self.ts.ConstRef): - return 'ConstPtr(ptr' + str(mv) + ')' + if arg.value: + return 'ConstPtr(ptr' + str(mv) + ')' + return 'ConstPtr(null)' elif isinstance(arg, self.ts.BoxRef): return 'p' + str(mv) elif isinstance(arg, ConstFloat): From noreply at buildbot.pypy.org Wed Nov 13 12:17:38 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 13 Nov 2013 12:17:38 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: Add missing canmallocgcs. Ops that don't do malloc but can contain a GC safe- Message-ID: <20131113111738.862381C025A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68005:e5e15daf8958 Date: 2013-11-13 12:16 +0100 http://bitbucket.org/pypy/pypy/changeset/e5e15daf8958/ Log: Add missing canmallocgcs. Ops that don't do malloc but can contain a GC safe- point (e.g. BecomeInevitable) need to have canmallocgc=True... diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -1,3 +1,14 @@ +------------------------------------------------------------ + +POSSIBLE BUG: +investigate if another thread can force a jitframe. Thus, +making a transaction break *after* a guard_not_forced +would be wrong, as the force will only be visible after +the break. (The GIL doesn't get released inbetween +the GUARD and the next call that is allowed to, so no +problems there).. +Solution, move transaction breaks right before guard_not_forced, maybe + ------------------------------------------------------------ should stm_thread_local_obj always be read & writeable? would diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -97,6 +97,7 @@ self.default(hop) self.pop_roots(hop, livevars) + # sync with lloperation.py gct_stm_become_inevitable = _gct_with_roots_pushed gct_stm_set_transaction_length = _gct_with_roots_pushed gct_stm_stop_all_other_threads = _gct_with_roots_pushed diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -410,6 +410,9 @@ # direct_calls and maybe several casts, but it looks less heavy-weight # to keep them as operations until the genc stage) + # NOTE: use canmallocgc for all operations that can contain a collection. + # that includes all that do 'BecomeInevitable' or otherwise contain + # possible GC safe-points! (also sync with stmframework.py) 'stm_initialize': LLOp(), 'stm_finalize': LLOp(), 'stm_barrier': LLOp(sideeffects=False), @@ -429,7 +432,7 @@ 'stm_commit_transaction': LLOp(), 'stm_begin_inevitable_transaction': LLOp(), 'stm_should_break_transaction': LLOp(sideeffects=False), - 'stm_set_transaction_length': LLOp(), + 'stm_set_transaction_length': LLOp(canmallocgc=True), 'stm_change_atomic': LLOp(), 'stm_get_atomic': LLOp(sideeffects=False), 'stm_perform_transaction':LLOp(canmallocgc=True), @@ -447,7 +450,7 @@ 'stm_abort_info_push': LLOp(), 'stm_abort_info_pop': LLOp(), - 'stm_inspect_abort_info': LLOp(sideeffects=False), + 'stm_inspect_abort_info': LLOp(sideeffects=False, canmallocgc=True), 'stm_get_adr_of_private_rev_num':LLOp(), 'stm_get_adr_of_read_barrier_cache':LLOp(), diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -21,6 +21,7 @@ 'stm_threadlocalref_get', 'stm_threadlocalref_set', 'stm_threadlocalref_count', 'stm_threadlocalref_addr', 'jit_assembler_call', 'gc_writebarrier', + 'shrink_array', ]) ALWAYS_ALLOW_OPERATIONS |= set(lloperation.enum_tryfold_ops()) From noreply at buildbot.pypy.org Wed Nov 13 16:36:41 2013 From: noreply at buildbot.pypy.org (ltratt) Date: Wed, 13 Nov 2013 16:36:41 +0100 (CET) Subject: [pypy-commit] pypy more_strategies: Add IntegerListAscending strategy. Message-ID: <20131113153641.8F0DA1C13E5@cobra.cs.uni-duesseldorf.de> Author: Laurence Tratt Branch: more_strategies Changeset: r68006:ef5630ce70e3 Date: 2013-11-13 15:26 +0000 http://bitbucket.org/pypy/pypy/changeset/ef5630ce70e3/ Log: Add IntegerListAscending strategy. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -896,7 +896,7 @@ def switch_to_correct_strategy(self, w_list, w_item): if type(w_item) is W_IntObject: - strategy = self.space.fromcache(IntegerListStrategy) + strategy = self.space.fromcache(IntegerListAscendingStrategy) elif type(w_item) is W_StringObject: strategy = self.space.fromcache(StringListStrategy) elif type(w_item) is W_UnicodeObject: @@ -1010,7 +1010,11 @@ def switch_to_integer_strategy(self, w_list): items = self._getitems_range(w_list, False) - strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) + start, step, length = self.unerase(w_list.lstorage) + if step > 0: + strategy = w_list.strategy = self.space.fromcache(IntegerListAscendingStrategy) + else: + strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) w_list.lstorage = strategy.erase(items) def wrap(self, intval): @@ -1518,6 +1522,25 @@ def unwrap(self, w_int): return self.space.int_w(w_int) + def init_from_list_w(self, w_list, list_w): + # While unpacking integer elements, also determine whether they're + # pre-sorted. + assert len(list_w) > 0 + asc = True + l = [0] * len(list_w) + lst = l[0] = self.unwrap(list_w[0]) + for i in range(1, len(list_w)): + item_w = list_w[i] + it = self.unwrap(item_w) + if asc and it < lst: + asc = False + l[i] = it + lst = it + w_list.lstorage = self.erase(l) + if asc: + # The list was already sorted into ascending order. + w_list.strategy = self.space.fromcache(IntegerListAscendingStrategy) + erase, unerase = rerased.new_erasing_pair("integer") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -1526,7 +1549,8 @@ return type(w_obj) is W_IntObject def list_is_correct_type(self, w_list): - return w_list.strategy is self.space.fromcache(IntegerListStrategy) + return w_list.strategy is self.space.fromcache(IntegerListStrategy) \ + or w_list.strategy is self.space.fromcache(IntegerListAscendingStrategy) def sort(self, w_list, reverse): l = self.unerase(w_list.lstorage) @@ -1534,6 +1558,8 @@ sorter.sort() if reverse: l.reverse() + else: + w_list.strategy = self.space.fromcache(IntegerListAscendingStrategy) def getitems_int(self, w_list): return self.unerase(w_list.lstorage) @@ -1611,6 +1637,94 @@ self.space, storage, self) return self._base_setslice(w_list, start, step, slicelength, w_other) +class IntegerListAscendingStrategy(IntegerListStrategy): + def sort(self, w_list, reverse): + if reverse: + self.unerase(w_list.lstorage).reverse() + w_list.strategy = self.space.fromcache(IntegerListStrategy) + + def append(self, w_list, w_item): + if type(w_item) is W_IntObject: + l = self.unerase(w_list.lstorage) + length = len(l) + item = self.unwrap(w_item) + if length == 0 or l[length - 1] <= item: + l.append(item) + return + w_list.strategy = self.space.fromcache(IntegerListStrategy) + IntegerListStrategy.append(self, w_list, w_item) + + def insert(self, w_list, index, w_item): + if type(w_item) is W_IntObject: + l = self.unerase(w_list.lstorage) + length = len(l) + item = self.unwrap(w_item) + if length == 0 or \ + ((index == 0 or l[index - 1] <= item) and (index == length or l[index] >= item)): + l.insert(index, item) + return + w_list.strategy = self.space.fromcache(IntegerListStrategy) + IntegerListStrategy.insert(self, w_list, index, w_item) + + def _extend_from_list(self, w_list, w_item): + if type(w_item) is W_ListObject and \ + w_item.strategy is self.space.fromcache(IntegerListAscendingStrategy): + self_l = self.unerase(w_list.lstorage) + other_l = self.unerase(w_item.lstorage) + if len(self_l) == 0 or len(other_l) == 0 or self_l[len(self_l) - 1] <= other_l[0]: + self_l.extend(other_l) + return + w_list.strategy = self.space.fromcache(IntegerListStrategy) + IntegerListStrategy._extend_from_list(self,w_list, w_item) + + def setitem(self, w_list, index, w_item): + if type(w_item) is W_IntObject: + item = self.unwrap(w_item) + l = self.unerase(w_list.lstorage) + length = len(l) + assert len(l) > 0 + if (index == 0 or l[index - 1] <= item) \ + and (index == length - 1 or l[index + 1] >= item): + l[index] = item + return + w_list.strategy = self.space.fromcache(IntegerListStrategy) + IntegerListStrategy.setitem(self, w_list, index, w_item) + + def setslice(self, w_list, start, step, slicelength, w_other): + # XXX could be supported if really desired + w_list.strategy = self.space.fromcache(IntegerListStrategy) + IntegerListStrategy.setslice(self, w_list, start, step, slicelength, w_other) + + def inplace_mul(self, w_list, times): + l = self.unerase(w_list.lstorage) + length = len(l) + if length == 0: + return + if l[0] != l[length - 1]: + w_list.strategy = self.space.fromcache(IntegerListStrategy) + IntegerListStrategy.inplace_mul(self, w_list, times) + + def reverse(self, w_list): + self.unerase(w_list.lstorage).reverse() + w_list.strategy = self.space.fromcache(IntegerListStrategy) + + def _safe_find(self, w_list, obj, start, stop): + if w_list.length() < 16: + return IntegerListStrategy._safe_find(self, w_list, obj, start, stop) + l = self.unerase(w_list.lstorage) + start -= 1 + stop += 1 + while stop - start > 1: + p = (start + stop) / 2 + if l[p] < obj: + start = p + else: + stop = p + if stop == len(l) or l[stop] != obj: + raise ValueError + return stop + + class FloatListStrategy(ListStrategy): import_from_mixin(AbstractUnwrappedStrategy) @@ -1640,7 +1754,7 @@ elif w_objt is W_IntObject or w_objt is W_LongObject: return self._safe_find(w_list, w_obj.float_w(self.space), start, stop) elif w_objt is W_StringObject or w_objt is W_UnicodeObject \ - or self.space.type(w_obj).compares_by_identity(): + or self.space.type(w_obj).compares_by_identity(): raise ValueError return ListStrategy.find(self, w_list, w_obj, start, stop) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -526,6 +526,12 @@ assert not l.__contains__(-20) assert not l.__contains__(-21) + l = list(range(1000)) + assert l.index(123) == 123 + del l[123] + raises(ValueError, "l.index(123)") + assert l.index(124) == 123 + def test_call_list(self): assert list('') == [] assert list('abc') == ['a', 'b', 'c'] @@ -577,6 +583,10 @@ assert m == [5,2,3] assert l == [1,2,3] + l = [1,2,3] + l.extend([3,4]) + assert l == [1, 2, 3, 3, 4] + def test_extend_tuple(self): l = l0 = [1] l.extend((2,)) diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,5 +1,8 @@ import sys -from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, StringListStrategy, RangeListStrategy, make_range_list, UnicodeListStrategy +from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, \ + ObjectListStrategy, IntegerListStrategy, IntegerListAscendingStrategy, \ + FloatListStrategy, StringListStrategy, RangeListStrategy, make_range_list, \ + UnicodeListStrategy from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject @@ -342,7 +345,6 @@ r = make_range_list(space, 1,3,7) empty.extend(r) assert isinstance(empty.strategy, RangeListStrategy) - print empty.getitem(6) assert space.is_true(space.eq(empty.getitem(1), w(4))) empty = W_ListObject(space, []) @@ -480,7 +482,8 @@ l1 = make_range_list(self.space, 0, 1, 100) l2 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) l3 = self.space.add(l2, l1) - assert l3.strategy is l2.strategy + assert isinstance(l2.strategy, IntegerListAscendingStrategy) + assert isinstance(l3.strategy, IntegerListStrategy) def test_mul(self): l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) @@ -659,6 +662,80 @@ list_copy[0] = 42 assert list_orig == [1, 2, 3] + def test_integerascending(self): + space = self.space + w_l = W_ListObject(space, [space.wrap(1), space.wrap(3)]) + assert isinstance(w_l.strategy, IntegerListAscendingStrategy) + w_l.append(space.wrap(5)) + assert isinstance(w_l.strategy, IntegerListAscendingStrategy) + + w_l.insert(0, space.wrap(0)) + assert isinstance(w_l.strategy, IntegerListAscendingStrategy) + w_l.insert(4, space.wrap(6)) + assert isinstance(w_l.strategy, IntegerListAscendingStrategy) + assert space.listview_int(w_l) == [0, 1, 3, 5 ,6] + + w_l = W_ListObject(space, []) + w_l.insert(0, space.wrap(1)) + assert isinstance(w_l.strategy, IntegerListAscendingStrategy) + + w_l = W_ListObject(space, [space.wrap(3), space.wrap(2), space.wrap(4), space.wrap(1)]) + assert isinstance(w_l.strategy, IntegerListStrategy) + l2 = [1, 2, 3, 4] + space.call_method(w_l, "sort") + assert isinstance(w_l.strategy, IntegerListAscendingStrategy) + assert space.listview_int(w_l) == l2 + space.call_method(w_l, "sort") + assert space.listview_int(w_l) == l2 + w_l.append(space.wrap(5)) + assert isinstance(w_l.strategy, IntegerListAscendingStrategy) + w_l.append(space.wrap(0)) + assert isinstance(w_l.strategy, IntegerListStrategy) + + w_l = W_ListObject(space, []) + space.call_method(w_l, "extend", W_ListObject(space, [space.wrap(1), space.wrap(2)])) + assert space.listview_int(w_l) == [1, 2] + assert isinstance(w_l.strategy, IntegerListAscendingStrategy) + + space.call_method(w_l, "extend", W_ListObject(space, [space.wrap(4)])) + assert space.listview_int(w_l) == [1, 2, 4] + assert isinstance(w_l.strategy, IntegerListAscendingStrategy) + + space.call_method(w_l, "pop") + space.call_method(w_l, "pop") + space.call_method(w_l, "pop") + assert space.listview_int(w_l) == [] + assert isinstance(w_l.strategy, IntegerListAscendingStrategy) + space.call_method(w_l, "extend", W_ListObject(space, [space.wrap(4)])) + assert space.listview_int(w_l) == [4] + assert isinstance(w_l.strategy, IntegerListAscendingStrategy) + + space.call_method(w_l, "extend", W_ListObject(space, [space.wrap(0)])) + assert space.listview_int(w_l) == [4, 0] + assert isinstance(w_l.strategy, IntegerListStrategy) + + w_l = W_ListObject(space, [space.wrap(1), space.wrap(3), space.wrap(5)]) + assert isinstance(w_l.strategy, IntegerListAscendingStrategy) + w_l.setitem(0, space.wrap(0)) + w_l.setitem(1, space.wrap(4)) + w_l.setitem(2, space.wrap(6)) + assert isinstance(w_l.strategy, IntegerListAscendingStrategy) + w_l.setitem(1, space.wrap(7)) + assert isinstance(w_l.strategy, IntegerListStrategy) + + w_l = W_ListObject(space, [space.wrap(1), space.wrap(1)]) + w_l.inplace_mul(2) + assert isinstance(w_l.strategy, IntegerListAscendingStrategy) + w_l.append(space.wrap(2)) + w_l.inplace_mul(2) + assert isinstance(w_l.strategy, IntegerListStrategy) + + w_l = W_ListObject(space, [space.wrap(1), space.wrap(2)]) + assert isinstance(w_l.strategy, IntegerListAscendingStrategy) + w_l.sort(True) + assert isinstance(w_l.strategy, IntegerListStrategy) + assert space.listview_int(w_l) == [2, 1] + class TestW_ListStrategiesDisabled: spaceconfig = {"objspace.std.withliststrategies": False} From noreply at buildbot.pypy.org Thu Nov 14 02:04:59 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 02:04:59 +0100 (CET) Subject: [pypy-commit] pypy default: allow (unsupported) out arg for scalar.round Message-ID: <20131114010459.7F7AB1C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68007:bb54a2f34c40 Date: 2013-11-13 19:33 -0500 http://bitbucket.org/pypy/pypy/changeset/bb54a2f34c40/ Log: allow (unsupported) out arg for scalar.round diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -255,7 +255,10 @@ return convert_to_array(space, w_values) @unwrap_spec(decimals=int) - def descr_round(self, space, decimals=0): + def descr_round(self, space, decimals=0, w_out=None): + if not space.is_none(w_out): + raise OperationError(space.w_NotImplementedError, space.wrap( + "out not supported")) v = self.convert_to(self.get_dtype(space)) return self.get_dtype(space).itemtype.round(v, decimals) diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -53,6 +53,7 @@ assert f.round() == 13. assert f.round(decimals=-1) == 10. assert f.round(decimals=1) == 13.4 + assert f.round(decimals=1, out=None) == 13.4 assert b.round() == 1.0 assert b.round(decimals=5) is b From noreply at buildbot.pypy.org Thu Nov 14 02:05:01 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 02:05:01 +0100 (CET) Subject: [pypy-commit] pypy default: backout d50d27c01818, doesn't work as intended when translated Message-ID: <20131114010501.037361C025A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68008:277365bc6cda Date: 2013-11-13 20:03 -0500 http://bitbucket.org/pypy/pypy/changeset/277365bc6cda/ Log: backout d50d27c01818, doesn't work as intended when translated diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -213,10 +213,7 @@ def descr_setitem(self, space, w_idx, w_value): if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type(): - try: - self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) - except ValueError, e: - raise OperationError(space.w_ValueError, space.wrap(str(e))) + self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) return try: self.implementation.descr_setitem(space, self, w_idx, w_value) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2461,23 +2461,6 @@ assert exc.value[0].find('cannot assign') >= 0 assert (a == [[0, 1], [2, 3], [4, 5]]).all() - def test_nonarray_assignment(self): - import numpypy as np - a = np.arange(10) - b = np.ones(10, dtype=bool) - r = np.arange(10) - def assign(a, b, c): - a[b] = c - raises(ValueError, assign, a, b, np.nan) - #raises(ValueError, assign, a, r, np.nan) # XXX - import sys - if '__pypy__' not in sys.builtin_module_names: - a[b] = np.array(np.nan) - #a[r] = np.array(np.nan) - else: - raises(ValueError, assign, a, b, np.array(np.nan)) - #raises(ValueError, assign, a, r, np.array(np.nan)) - def test_copy_kwarg(self): from numpypy import array x = array([1, 2, 3]) From noreply at buildbot.pypy.org Thu Nov 14 02:29:20 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 02:29:20 +0100 (CET) Subject: [pypy-commit] pypy default: support eq/ne for array.flags Message-ID: <20131114012920.957251C13E5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68009:b9b04c08e3d3 Date: 2013-11-13 20:28 -0500 http://bitbucket.org/pypy/pypy/changeset/b9b04c08e3d3/ Log: support eq/ne for array.flags diff --git a/pypy/module/micronumpy/interp_flagsobj.py b/pypy/module/micronumpy/interp_flagsobj.py --- a/pypy/module/micronumpy/interp_flagsobj.py +++ b/pypy/module/micronumpy/interp_flagsobj.py @@ -7,6 +7,7 @@ class W_FlagsObject(W_Root): def __init__(self, arr): self.arr = arr + self.flags = 0 def descr_get_contiguous(self, space): return space.w_True @@ -32,10 +33,23 @@ raise OperationError(space.w_KeyError, space.wrap( "Unknown flag")) + def eq(self, space, w_other): + if not isinstance(w_other, W_FlagsObject): + return False + return self.flags == w_other.flags + + def descr_eq(self, space, w_other): + return space.wrap(self.eq(space, w_other)) + + def descr_ne(self, space, w_other): + return space.wrap(not self.eq(space, w_other)) + W_FlagsObject.typedef = TypeDef("flagsobj", __module__ = "numpy", __getitem__ = interp2app(W_FlagsObject.descr_getitem), __setitem__ = interp2app(W_FlagsObject.descr_setitem), + __eq__ = interp2app(W_FlagsObject.descr_eq), + __ne__ = interp2app(W_FlagsObject.descr_ne), contiguous = GetSetProperty(W_FlagsObject.descr_get_contiguous), c_contiguous = GetSetProperty(W_FlagsObject.descr_get_contiguous), diff --git a/pypy/module/micronumpy/test/test_flagsobj.py b/pypy/module/micronumpy/test/test_flagsobj.py --- a/pypy/module/micronumpy/test/test_flagsobj.py +++ b/pypy/module/micronumpy/test/test_flagsobj.py @@ -20,3 +20,10 @@ import numpy as np a = np.int32(2) assert a.flags.c_contiguous == True + + def test_compare(self): + import numpy as np + a = np.array([1,2,3]) + b = np.array([4,5,6,7]) + assert a.flags == b.flags + assert not a.flags != b.flags From noreply at buildbot.pypy.org Thu Nov 14 03:36:05 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 03:36:05 +0100 (CET) Subject: [pypy-commit] pypy default: add size attribute for scalars Message-ID: <20131114023605.C98581C036B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68010:e14d70573095 Date: 2013-11-13 20:52 -0500 http://bitbucket.org/pypy/pypy/changeset/e14d70573095/ Log: add size attribute for scalars diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -284,6 +284,9 @@ def descr_get_dtype(self, space): return self.get_dtype(space) + def descr_get_size(self, space): + return space.wrap(1) + def descr_get_itemsize(self, space): return self.get_dtype(space).descr_get_itemsize(space) @@ -554,6 +557,7 @@ copy = interp2app(W_GenericBox.descr_copy), dtype = GetSetProperty(W_GenericBox.descr_get_dtype), + size = GetSetProperty(W_GenericBox.descr_get_size), itemsize = GetSetProperty(W_GenericBox.descr_get_itemsize), nbytes = GetSetProperty(W_GenericBox.descr_get_itemsize), shape = GetSetProperty(W_GenericBox.descr_get_shape), diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -85,6 +85,7 @@ import numpypy as np value = np.dtype('int64').type(12345) assert value.dtype == np.dtype('int64') + assert value.size == 1 assert value.itemsize == 8 assert value.nbytes == 8 assert value.shape == () From noreply at buildbot.pypy.org Thu Nov 14 03:36:07 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 03:36:07 +0100 (CET) Subject: [pypy-commit] pypy default: support axis argument for array.squeeze Message-ID: <20131114023607.19BDE1C036B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68011:f85795564612 Date: 2013-11-13 21:33 -0500 http://bitbucket.org/pypy/pypy/changeset/f85795564612/ Log: support axis argument for array.squeeze diff --git a/pypy/module/micronumpy/conversion_utils.py b/pypy/module/micronumpy/conversion_utils.py --- a/pypy/module/micronumpy/conversion_utils.py +++ b/pypy/module/micronumpy/conversion_utils.py @@ -40,3 +40,24 @@ else: raise OperationError(space.w_TypeError, space.wrap( "order not understood")) + +def multi_axis_converter(space, w_axis, ndim): + if space.is_none(w_axis): + return [True] * ndim + out = [False] * ndim + if not space.isinstance_w(w_axis, space.w_tuple): + w_axis = space.newtuple([w_axis]) + for w_item in space.fixedview(w_axis): + item = space.int_w(w_item) + axis = item + if axis < 0: + axis += ndim + if axis < 0 or axis >= ndim: + raise OperationError(space.w_ValueError, space.wrap( + "'axis' entry %d is out of bounds [-%d, %d)" % + (item, ndim, ndim))) + if out[axis]: + raise OperationError(space.w_ValueError, space.wrap( + "duplicate value in 'axis'")) + out[axis] = True + return out diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -19,7 +19,7 @@ from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation -from pypy.module.micronumpy.conversion_utils import order_converter +from pypy.module.micronumpy.conversion_utils import order_converter, multi_axis_converter from pypy.module.micronumpy.constants import * def _find_shape(space, w_size, dtype): @@ -692,11 +692,20 @@ return self.implementation.sort(space, w_axis, w_order) def descr_squeeze(self, space, w_axis=None): + cur_shape = self.get_shape() if not space.is_none(w_axis): - raise OperationError(space.w_NotImplementedError, space.wrap( - "axis unsupported for squeeze")) - cur_shape = self.get_shape() - new_shape = [s for s in cur_shape if s != 1] + axes = multi_axis_converter(space, w_axis, len(cur_shape)) + new_shape = [] + for i in range(len(cur_shape)): + if axes[i]: + if cur_shape[i] != 1: + raise OperationError(space.w_ValueError, space.wrap( + "cannot select an axis to squeeze out " \ + "which has size greater than one")) + else: + new_shape.append(cur_shape[i]) + else: + new_shape = [s for s in cur_shape if s != 1] if len(cur_shape) == len(new_shape): return self return wrap_impl(space, space.type(self), self, diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1746,6 +1746,18 @@ assert (b == a).all() b[1] = -1 assert a[0][1] == -1 + a = np.arange(9).reshape((3, 1, 3, 1)) + b = a.squeeze(1) + assert b.shape == (3, 3, 1) + b = a.squeeze((1,)) + assert b.shape == (3, 3, 1) + b = a.squeeze((1, -1)) + assert b.shape == (3, 3) + exc = raises(ValueError, a.squeeze, 5) + assert exc.value.message == "'axis' entry 5 is out of bounds [-4, 4)" + exc = raises(ValueError, a.squeeze, 0) + assert exc.value.message == "cannot select an axis to squeeze out " \ + "which has size greater than one" def test_swapaxes(self): from numpypy import array From noreply at buildbot.pypy.org Thu Nov 14 03:37:34 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 03:37:34 +0100 (CET) Subject: [pypy-commit] pypy default: test duplicate axes also Message-ID: <20131114023734.7351C1C036B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68012:c780cd5bf67f Date: 2013-11-13 21:36 -0500 http://bitbucket.org/pypy/pypy/changeset/c780cd5bf67f/ Log: test duplicate axes also diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1758,6 +1758,8 @@ exc = raises(ValueError, a.squeeze, 0) assert exc.value.message == "cannot select an axis to squeeze out " \ "which has size greater than one" + exc = raises(ValueError, a.squeeze, (1, 1)) + assert exc.value.message == "duplicate value in 'axis'" def test_swapaxes(self): from numpypy import array From noreply at buildbot.pypy.org Thu Nov 14 05:38:31 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 14 Nov 2013 05:38:31 +0100 (CET) Subject: [pypy-commit] pypy py3k: reapply frame/cell related py3k changes Message-ID: <20131114043831.9D9A81C13E5@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68014:0e3f24e124ac Date: 2013-11-13 20:36 -0800 http://bitbucket.org/pypy/pypy/changeset/0e3f24e124ac/ Log: reapply frame/cell related py3k changes diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -510,7 +510,7 @@ for i in range(min(len(varnames), self.getcode().co_nlocals)): name = varnames[i] w_value = self.locals_stack_w[i] - w_name = self.space.wrap(name) + w_name = self.space.wrap(name.decode('utf-8')) if w_value is not None: self.space.setitem(self.w_locals, w_name, w_value) else: @@ -547,7 +547,7 @@ new_fastlocals_w = [None] * numlocals for i in range(min(len(varnames), numlocals)): - w_name = self.space.wrap(varnames[i]) + w_name = self.space.wrap(varnames[i].decode('utf-8')) try: w_value = self.space.getitem(self.w_locals, w_name) except OperationError, e: @@ -576,6 +576,8 @@ """ Initialize cellvars from self.locals_stack_w. """ + if self.cells is None: + return args_to_copy = self.pycode._args_as_cellvars for i in range(len(args_to_copy)): argnum = args_to_copy[i] diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -484,15 +484,7 @@ try: w_value = cell.get() except ValueError: - varname = self.getfreevarname(varindex) - if self.iscellvar(varindex): - message = "local variable '%s' referenced before assignment" % varname - w_exc_type = self.space.w_UnboundLocalError - else: - message = ("free variable '%s' referenced before assignment" - " in enclosing scope" % varname) - w_exc_type = self.space.w_NameError - raise OperationError(w_exc_type, self.space.wrap(message)) + self.raise_exc_unbound(varindex) else: self.pushvalue(w_value) @@ -502,6 +494,26 @@ cell = self.cells[varindex] cell.set(w_newvalue) + def DELETE_DEREF(self, varindex, next_instr): + cell = self.cells[varindex] + try: + cell.get() + except ValueError: + self.raise_exc_unbound(varindex) + else: + cell.set(None) + + def raise_exc_unbound(self, varindex): + varname = self.getfreevarname(varindex) + if self.iscellvar(varindex): + message = "local variable '%s' referenced before assignment"%varname + w_exc_type = self.space.w_UnboundLocalError + else: + message = ("free variable '%s' referenced before assignment" + " in enclosing scope"%varname) + w_exc_type = self.space.w_NameError + raise OperationError(w_exc_type, self.space.wrap(message)) + def LOAD_CLOSURE(self, varindex, next_instr): # nested scopes: access the cell object cell = self.cells[varindex] @@ -1200,16 +1212,11 @@ return self._make_function(oparg) @jit.unroll_safe - def MAKE_CLOSURE(self, numdefaults, next_instr): - w_codeobj = self.popvalue() - codeobj = self.space.interp_w(pycode.PyCode, w_codeobj) - w_freevarstuple = self.popvalue() + def MAKE_CLOSURE(self, oparg, next_instr): + w_freevarstuple = self.peekvalue(1) freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] - defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, - defaultarguments, freevars) - self.pushvalue(self.space.wrap(fn)) + self._make_function(oparg, freevars) def BUILD_SLICE(self, numargs, next_instr): if numargs == 3: From noreply at buildbot.pypy.org Thu Nov 14 05:38:30 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 14 Nov 2013 05:38:30 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20131114043830.51EA61C13B9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68013:f9c10dfe0d96 Date: 2013-11-13 19:47 -0800 http://bitbucket.org/pypy/pypy/changeset/f9c10dfe0d96/ Log: merge default diff too long, truncating to 2000 out of 23611 lines diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,7 +1,38 @@ all: pypy-c +PYPY_EXECUTABLE := $(shell which pypy) +URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") + +ifeq ($(PYPY_EXECUTABLE),) +RUNINTERP = python +else +RUNINTERP = $(PYPY_EXECUTABLE) +endif + pypy-c: - @echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM" - @sleep 3 - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + @echo + @echo "====================================================================" +ifeq ($(PYPY_EXECUTABLE),) + @echo "Building a regular (jitting) version of PyPy, using CPython." + @echo "This takes around 2 hours and $(URAM) GB of RAM." + @echo "Note that pre-installing a PyPy binary would reduce this time" + @echo "and produce basically the same result." +else + @echo "Building a regular (jitting) version of PyPy, using" + @echo "$(PYPY_EXECUTABLE) to run the translation itself." + @echo "This takes up to 1 hour and $(URAM) GB of RAM." +endif + @echo + @echo "For more control (e.g. to use multiple CPU cores during part of" + @echo "the process) you need to run \`\`rpython/bin/rpython'' directly." + @echo "For more information see \`\`http://pypy.org/download.html''." + @echo "====================================================================" + @echo + @sleep 5 + $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + +# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are +# replaced with an opaque --jobserver option by the time this Makefile +# runs. We cannot get their original value either: +# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -1,5 +1,5 @@ ===================================== -PyPy: Python in Python Implementation +PyPy: Python in Python Implementation ===================================== Welcome to PyPy! @@ -26,9 +26,11 @@ Building ======== -build with:: +build with: - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py +.. code-block:: console + + $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest to use virtualenv with the resulting pypy-c as the interpreter, you can diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -4,6 +4,21 @@ from __pypy__.builders import StringBuilder, UnicodeBuilder +class StringOrUnicodeBuilder(object): + def __init__(self): + self._builder = StringBuilder() + def append(self, string): + try: + self._builder.append(string) + except UnicodeEncodeError: + ub = UnicodeBuilder() + ub.append(self._builder.build()) + self._builder = ub + ub.append(string) + def build(self): + return self._builder.build() + + ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') @@ -192,7 +207,7 @@ if self.ensure_ascii: builder = StringBuilder() else: - builder = UnicodeBuilder() + builder = StringOrUnicodeBuilder() self.__encode(o, markers, builder, 0) return builder.build() diff --git a/lib-python/2.7/string.py b/lib-python/2.7/string.py --- a/lib-python/2.7/string.py +++ b/lib-python/2.7/string.py @@ -66,16 +66,17 @@ must be of the same length. """ - if len(fromstr) != len(tostr): + n = len(fromstr) + if n != len(tostr): raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) + # this function has been rewritten to suit PyPy better; it is + # almost 10x faster than the original. + buf = bytearray(256) + for i in range(256): + buf[i] = i + for i in range(n): + buf[ord(fromstr[i])] = tostr[i] + return str(buf) diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -48,7 +48,14 @@ def remove(wr, selfref=ref(self)): self = selfref() if self is not None: - del self.data[wr.key] + # Changed this for PyPy: made more resistent. The + # issue is that in some corner cases, self.data + # might already be changed or removed by the time + # this weakref's callback is called. If that is + # the case, we don't want to randomly kill an + # unrelated entry. + if self.data.get(wr.key) is wr: + del self.data[wr.key] self._remove = remove UserDict.UserDict.__init__(self, *args, **kw) @@ -160,22 +167,26 @@ try: o = self.data.pop(key)() except KeyError: + o = None + if o is None: if args: return args[0] - raise - if o is None: raise KeyError, key else: return o + # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: - wr = self.data[key] + o = self.data[key]() except KeyError: + o = None + if o is None: self.data[key] = KeyedRef(default, self._remove, key) return default else: - return wr() + return o + # The logic above was fixed in PyPy def update(self, dict=None, **kwargs): d = self.data diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.7 +Version: 0.8 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7.2" -__version_info__ = (0, 7, 2) +__version__ = "0.8" +__version_info__ = (0, 8) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,5 @@ import types +from .lock import allocate_lock try: callable @@ -61,6 +62,7 @@ # rely on it! It's probably not going to work well.) self._backend = backend + self._lock = allocate_lock() self._parser = cparser.Parser() self._cached_btypes = {} self._parsed_types = types.ModuleType('parsed_types').__dict__ @@ -74,7 +76,8 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - self.BVoidP = self._get_cached_btype(model.voidp_type) + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -95,11 +98,12 @@ if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') - self._parser.parse(csource, override=override) - self._cdefsources.append(csource) - if override: - for cache in self._function_caches: - cache.clear() + with self._lock: + self._parser.parse(csource, override=override) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -109,31 +113,47 @@ library we only look for the actual (untyped) symbols. """ assert isinstance(name, basestring) or name is None - lib, function_cache = _make_ffi_library(self, name, flags) - self._function_caches.append(function_cache) - self._libraries.append(lib) + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) return lib + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + really_a_function_type = type.is_raw_function + if really_a_function_type: + type = type.as_function_pointer() + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, cfaf = self._parsed_types[cdecl] - if consider_function_as_funcptr and not cfaf: - raise KeyError + result = self._parsed_types[cdecl] except KeyError: - key = cdecl - if not isinstance(cdecl, str): # unicode, on Python 2 - cdecl = cdecl.encode('ascii') - cfaf = consider_function_as_funcptr - type = self._parser.parse_type(cdecl, - consider_function_as_funcptr=cfaf) - btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, cfaf + with self._lock: + result = self._typeof_locked(cdecl) + # + btype, really_a_function_type = result + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) return btype def typeof(self, cdecl): """Parse the C type given as a string and return the - corresponding Python type: '>. + corresponding object. It can also be used on 'cdata' instance to get its C type. """ if isinstance(cdecl, basestring): @@ -144,6 +164,10 @@ res = _builtin_function_type(cdecl) if res is not None: return res + if (isinstance(cdecl, types.FunctionType) + and hasattr(cdecl, '_cffi_base_type')): + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) def sizeof(self, cdecl): @@ -280,14 +304,17 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + with self._lock: + try: + gc_weakrefs = self.gc_weakrefs + except AttributeError: + from .gc_weakref import GcWeakrefs + gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) + return gc_weakrefs.build(cdata, destructor) def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! try: BType = self._cached_btypes[type] except KeyError: @@ -322,7 +349,8 @@ def _pointer_to(self, ctype): from . import model - return model.pointer_cache(self, ctype) + with self._lock: + return model.pointer_cache(self, ctype) def addressof(self, cdata, field=None): """Return the address of a . @@ -342,10 +370,12 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ - self._parser.include(ffi_to_include._parser) - self._cdefsources.append('[') - self._cdefsources.extend(ffi_to_include._cdefsources) - self._cdefsources.append(']') + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) @@ -372,7 +402,7 @@ backendlib = backend.load_library(path, flags) copied_enums = [] # - def make_accessor(name): + def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: tp = ffi._parser._declarations[key] @@ -404,11 +434,17 @@ if enumname not in library.__dict__: library.__dict__[enumname] = enumval copied_enums.append(True) + if name in library.__dict__: + return # - if name in library.__dict__: # copied from an enum value just above, - return # or multithread's race condition raise AttributeError(name) # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + make_accessor_locked(name) + # class FFILibrary(object): def __getattr__(self, name): make_accessor(name) @@ -444,4 +480,5 @@ except (KeyError, AttributeError, TypeError): return None else: - return ffi._get_cached_btype(tp) + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -142,7 +142,7 @@ if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] if line: - msg = 'cannot parse "%s"\n%s' % (line, msg) + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) @@ -217,19 +217,18 @@ # if decl.name: tp = self._get_type(node, partial_length_ok=True) - if self._is_constant_declaration(node): + if self._is_constant_globalvar(node): self._declare('constant ' + decl.name, tp) else: self._declare('variable ' + decl.name, tp) - def parse_type(self, cdecl, consider_function_as_funcptr=False): + def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type, - consider_function_as_funcptr=consider_function_as_funcptr) + return self._get_type(exprnode.type) def _declare(self, name, obj): if name in self._declarations: @@ -249,28 +248,17 @@ return model.ConstPointerType(type) return model.PointerType(type) - def _get_type(self, typenode, convert_array_to_pointer=False, - name=None, partial_length_ok=False, - consider_function_as_funcptr=False): + def _get_type(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): type = self._declarations['typedef ' + typenode.type.names[0]] - if isinstance(type, model.ArrayType): - if convert_array_to_pointer: - return type.item - else: - if (consider_function_as_funcptr and - isinstance(type, model.RawFunctionType)): - return type.as_function_pointer() return type # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type - if convert_array_to_pointer: - return self._get_type_pointer(self._get_type(typenode.type)) if typenode.dim is None: length = None else: @@ -331,10 +319,7 @@ # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - result = self._parse_function_type(typenode, name) - if consider_function_as_funcptr: - result = result.as_function_pointer() - return result + return self._parse_function_type(typenode, name) # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): @@ -365,21 +350,24 @@ isinstance(params[0].type.type, pycparser.c_ast.IdentifierType) and list(params[0].type.type.names) == ['void']): del params[0] - args = [self._get_type(argdeclnode.type, - convert_array_to_pointer=True, - consider_function_as_funcptr=True) + args = [self._as_func_arg(self._get_type(argdeclnode.type)) for argdeclnode in params] result = self._get_type(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _is_constant_declaration(self, typenode, const=False): - if isinstance(typenode, pycparser.c_ast.ArrayDecl): - return self._is_constant_declaration(typenode.type) + def _as_func_arg(self, type): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _is_constant_globalvar(self, typenode): if isinstance(typenode, pycparser.c_ast.PtrDecl): - const = 'const' in typenode.quals - return self._is_constant_declaration(typenode.type, const) + return 'const' in typenode.quals if isinstance(typenode, pycparser.c_ast.TypeDecl): - return const or 'const' in typenode.quals + return 'const' in typenode.quals return False def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): @@ -491,7 +479,7 @@ return tp def _make_partial(self, tp, nested): - if not isinstance(tp, model.StructType): + if not isinstance(tp, model.StructOrUnion): raise api.CDefError("%s cannot be partial" % (tp,)) if not tp.has_c_name() and not nested: raise NotImplementedError("%s is partial but has no C name" %(tp,)) @@ -511,7 +499,7 @@ if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): self._partial_length = True - return None + return '...' # raise api.FFIError("unsupported expression: expected a " "simple numeric constant") diff --git a/lib_pypy/cffi/gc_weakref.py b/lib_pypy/cffi/gc_weakref.py --- a/lib_pypy/cffi/gc_weakref.py +++ b/lib_pypy/cffi/gc_weakref.py @@ -14,6 +14,6 @@ def build(self, cdata, destructor): # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi.typeof(cdata), cdata) + new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) self.data[ref(new_cdata, self.remove)] = destructor, cdata return new_cdata diff --git a/lib_pypy/cffi/lock.py b/lib_pypy/cffi/lock.py new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,7 +1,10 @@ import weakref +from .lock import allocate_lock + class BaseTypeByIdentity(object): is_array_type = False + is_raw_function = False def get_c_name(self, replace_with='', context='a C file'): result = self.c_name_with_marker @@ -146,6 +149,7 @@ # a function, but not a pointer-to-function. The backend has no # notion of such a type; it's used temporarily by parsing. _base_pattern = '(&)(%s)' + is_raw_function = True def build_backend_type(self, ffi, finishlist): from . import api @@ -192,10 +196,6 @@ _base_pattern = " const *&" _base_pattern_array = "(const *&)" - def build_backend_type(self, ffi, finishlist): - BPtr = PointerType(self.totype).get_cached_btype(ffi, finishlist) - return BPtr - const_voidp_type = ConstPointerType(void_type) @@ -216,10 +216,12 @@ self.item = item self.length = length # - if self.length is None: + if length is None: brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' else: - brackets = '&[%d]' % self.length + brackets = '&[%d]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) @@ -227,6 +229,10 @@ return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): + if self.length == '...': + from . import api + raise api.CDefError("cannot render the type %r: unknown length" % + (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) @@ -252,6 +258,7 @@ class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None completed = False + partial = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -303,20 +310,21 @@ return # not completing it: it's an opaque struct # self.completed = 1 - fldtypes = tuple(tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes) # if self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) ffi._backend.complete_struct_or_union(BType, lst, self) # else: + fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # - if isinstance(ftype, ArrayType) and ftype.length is None: + if isinstance(ftype, ArrayType) and ftype.length == '...': # fix the length to match the total size BItemType = ftype.item.get_cached_btype(ffi, finishlist) nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) @@ -327,18 +335,20 @@ ftype = ftype.resolve_length(nlen) self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) - BArrayType = ftype.get_cached_btype(ffi, finishlist) - fldtypes = (fldtypes[:i] + (BArrayType,) + - fldtypes[i+1:]) - continue # - bitemsize = ffi.sizeof(fldtypes[i]) - if bitemsize != fsize: - self._verification_error( - "field '%s.%s' is declared as %d bytes, but is " - "really %d bytes" % (self.name, - self.fldnames[i] or '{}', - bitemsize, fsize)) + BFieldType = ftype.get_cached_btype(ffi, finishlist) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) + fldtypes.append(BFieldType) + # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) ffi._backend.complete_struct_or_union(BType, lst, self, totalsize, totalalignment) @@ -348,11 +358,6 @@ from .ffiplatform import VerificationError raise VerificationError(msg) - -class StructType(StructOrUnion): - kind = 'struct' - partial = False - def check_not_partial(self): if self.partial and self.fixedlayout is None: from . import ffiplatform @@ -361,19 +366,18 @@ def build_backend_type(self, ffi, finishlist): self.check_not_partial() finishlist.append(self) - - return global_cache(self, ffi, 'new_struct_type', + # + return global_cache(self, ffi, 'new_%s_type' % self.kind, self.get_official_name(), key=self) +class StructType(StructOrUnion): + kind = 'struct' + + class UnionType(StructOrUnion): kind = 'union' - def build_backend_type(self, ffi, finishlist): - finishlist.append(self) - return global_cache(self, ffi, 'new_union_type', - self.get_official_name(), key=self) - class EnumType(StructOrUnionOrEnum): kind = 'enum' @@ -387,6 +391,12 @@ self.baseinttype = baseinttype self.build_c_name_with_marker() + def force_the_name(self, forcename): + StructOrUnionOrEnum.force_the_name(self, forcename) + if self.forcename is None: + name = self.get_official_name() + self.forcename = '$' + name.replace(' ', '_') + def check_not_partial(self): if self.partial and not self.partial_resolved: from . import ffiplatform @@ -444,6 +454,9 @@ tp = StructType(structname, None, None, None) return NamedPointerType(tp, name) + +global_lock = allocate_lock() + def global_cache(srctype, ffi, funcname, *args, **kwds): key = kwds.pop('key', (funcname, args)) assert not kwds @@ -464,8 +477,17 @@ res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: raise NotImplementedError("%r: %s" % (srctype, e)) - ffi._backend.__typecache[key] = res - return res + # note that setdefault() on WeakValueDictionary is not atomic + # and contains a rare bug (http://bugs.python.org/issue19542); + # we have to use a lock and do it ourselves + cache = ffi._backend.__typecache + with global_lock: + res1 = cache.get(key) + if res1 is None: + cache[key] = res + return res + else: + return res1 def pointer_cache(ffi, BType): return global_cache('?', ffi, 'new_pointer_type', BType) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -15,7 +15,7 @@ def patch_extension_kwds(self, kwds): pass - def find_module(self, module_name, path, so_suffix): + def find_module(self, module_name, path, so_suffixes): try: f, filename, descr = imp.find_module(module_name, path) except ImportError: @@ -25,7 +25,7 @@ # Note that after a setuptools installation, there are both .py # and .so files with the same basename. The code here relies on # imp.find_module() locating the .so in priority. - if descr[0] != so_suffix: + if descr[0] not in so_suffixes: return None return filename @@ -160,7 +160,10 @@ def __dir__(self): return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() - module._cffi_setup(lst, ffiplatform.VerificationError, library) + if module._cffi_setup(lst, ffiplatform.VerificationError, library): + import warnings + warnings.warn("reimporting %r might overwrite older definitions" + % (self.verifier.get_module_name())) # # finally, call the loaded_cpy_xxx() functions. This will perform # the final adjustments, like copying the Python->C wrapper @@ -280,8 +283,8 @@ return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, model.ArrayType): - return '_cffi_from_c_deref((char *)%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) elif isinstance(tp, model.StructType): if tp.fldnames is None: raise TypeError("'%s' is used as %s, but is opaque" % ( @@ -464,11 +467,14 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return _cffi_get_struct_layout(nums);') @@ -491,7 +497,7 @@ # function = getattr(module, layoutfuncname) layout = function() - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -528,9 +534,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -566,7 +573,7 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True): + vartp=None, delayed=True, size_too=False): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -597,6 +604,15 @@ '(unsigned long long)(%s));' % (name,)) prnt(' if (o == NULL)') prnt(' return -1;') + if size_too: + prnt(' {') + prnt(' PyObject *o1 = o;') + prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));' + % (name,)) + prnt(' Py_DECREF(o1);') + prnt(' if (o == NULL)') + prnt(' return -1;') + prnt(' }') prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name) prnt(' Py_DECREF(o);') prnt(' if (res < 0)') @@ -633,12 +649,23 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %d, ' - 'not %d",') - prnt(' "%s", "%s", (int)%s, %d);' % ( - name, enumerator, enumerator, enumvalue)) + prnt(' "enum %s: %s has the real value %s, ' + 'not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + name, enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return %s;' % self._chained_list_constants[True]) @@ -677,15 +704,16 @@ def _generate_cpy_variable_collecttype(self, tp, name): if isinstance(tp, model.ArrayType): - self._do_collect_type(tp) + tp_ptr = model.PointerType(tp.item) else: tp_ptr = model.PointerType(tp) - self._do_collect_type(tp_ptr) + self._do_collect_type(tp_ptr) def _generate_cpy_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): tp_ptr = model.PointerType(tp.item) - self._generate_cpy_const(False, name, tp, vartp=tp_ptr) + self._generate_cpy_const(False, name, tp, vartp=tp_ptr, + size_too = (tp.length == '...')) else: tp_ptr = model.PointerType(tp) self._generate_cpy_const(False, name, tp_ptr, category='var') @@ -694,11 +722,29 @@ _loading_cpy_variable = _loaded_noop def _loaded_cpy_variable(self, tp, name, module, library): + value = getattr(library, name) if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the - return # sense that "a=..." is forbidden + # sense that "a=..." is forbidden + if tp.length == '...': + assert isinstance(value, tuple) + (value, size) = value + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. - ptr = getattr(library, name) + ptr = value delattr(library, name) def getter(library): return ptr[0] @@ -711,12 +757,9 @@ def _generate_setup_custom(self): prnt = self._prnt - prnt('static PyObject *_cffi_setup_custom(PyObject *lib)') + prnt('static int _cffi_setup_custom(PyObject *lib)') prnt('{') - prnt(' if (%s < 0)' % self._chained_list_constants[True]) - prnt(' return NULL;') - prnt(' Py_INCREF(Py_None);') - prnt(' return Py_None;') + prnt(' return %s;' % self._chained_list_constants[True]) prnt('}') cffimod_header = r''' @@ -834,17 +877,20 @@ static void *_cffi_exports[_CFFI_NUM_EXPORTS]; static PyObject *_cffi_types, *_cffi_VerificationError; -static PyObject *_cffi_setup_custom(PyObject *lib); /* forward */ +static int _cffi_setup_custom(PyObject *lib); /* forward */ static PyObject *_cffi_setup(PyObject *self, PyObject *args) { PyObject *library; + int was_alive = (_cffi_types != NULL); if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, &library)) return NULL; Py_INCREF(_cffi_types); Py_INCREF(_cffi_VerificationError); - return _cffi_setup_custom(library); + if (_cffi_setup_custom(library) < 0) + return NULL; + return PyBool_FromLong(was_alive); } static void _cffi_init(void) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -20,15 +20,15 @@ # up in kwds['export_symbols']. kwds.setdefault('export_symbols', self.export_symbols) - def find_module(self, module_name, path, so_suffix): - basename = module_name + so_suffix - if path is None: - path = sys.path - for dirname in path: - filename = os.path.join(dirname, basename) - if os.path.isfile(filename): - return filename - return None + def find_module(self, module_name, path, so_suffixes): + for so_suffix in so_suffixes: + basename = module_name + so_suffix + if path is None: + path = sys.path + for dirname in path: + filename = os.path.join(dirname, basename) + if os.path.isfile(filename): + return filename def collect_types(self): pass # not needed in the generic engine @@ -173,6 +173,7 @@ newfunction = self._load_constant(False, tp, name, module) else: indirections = [] + base_tp = tp if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): indirect_args = [] for i, typ in enumerate(tp.args): @@ -186,16 +187,18 @@ wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) for i, typ in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, typ) + newfunction = self._make_struct_wrapper(newfunction, i, typ, + base_tp) setattr(library, name, newfunction) type(library)._cffi_dir.append(name) - def _make_struct_wrapper(self, oldfunc, i, tp): + def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): backend = self.ffi._backend BType = self.ffi._get_cached_btype(tp) def newfunc(*args): args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] return oldfunc(*args) + newfunc._cffi_base_type = base_tp return newfunc # ---------- @@ -252,11 +255,14 @@ prnt(' static ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return nums[i];') @@ -270,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi.typeof("ssize_t(*)(ssize_t)") + BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -279,7 +285,7 @@ if x < 0: break layout.append(x) num += 1 - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -316,9 +322,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -379,15 +386,17 @@ def _load_constant(self, is_int, tp, name, module): funcname = '_cffi_const_%s' % name if is_int: - BFunc = self.ffi.typeof("int(*)(long long*)") + BType = self.ffi._typeof_locked("long long*")[0] + BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) - p = self.ffi.new("long long*") + p = self.ffi.new(BType) negative = function(p) value = int(p[0]) if value < 0 and not negative: - value += (1 << (8*self.ffi.sizeof("long long"))) + BLongLong = self.ffi._typeof_locked("long long")[0] + value += (1 << (8*self.ffi.sizeof(BLongLong))) else: - BFunc = self.ffi.typeof(tp.get_c_name('(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() return value @@ -413,11 +422,22 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' snprintf(out_error, 255,' - '"%s has the real value %d, not %d",') - prnt(' "%s", (int)%s, %d);' % ( - enumerator, enumerator, enumvalue)) + ' "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % ( + enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') @@ -431,10 +451,11 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BFunc = self.ffi.typeof("int(*)(char*)") + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = '_cffi_e_%s_%s' % (prefix, name) function = module.load_function(BFunc, funcname) - p = self.ffi.new("char[]", 256) + p = self.ffi.new(BType, 256) if function(p) < 0: error = self.ffi.string(p) if sys.version_info >= (3,): @@ -465,6 +486,14 @@ def _generate_gen_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): + if tp.length == '...': + prnt = self._prnt + funcname = '_cffi_sizeof_%s' % (name,) + self.export_symbols.append(funcname) + prnt("size_t %s(void)" % funcname) + prnt("{") + prnt(" return sizeof(%s);" % (name,)) + prnt("}") tp_ptr = model.PointerType(tp.item) self._generate_gen_const(False, name, tp_ptr) else: @@ -476,6 +505,18 @@ def _loaded_gen_variable(self, tp, name, module, library): if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the # sense that "a=..." is forbidden + if tp.length == '...': + funcname = '_cffi_sizeof_%s' % (name,) + BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0] + function = module.load_function(BFunc, funcname) + size = function() + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) tp_ptr = model.PointerType(tp.item) value = self._load_constant(False, tp_ptr, name, module) # 'value' is a which we have to replace with @@ -489,7 +530,7 @@ # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. funcname = '_cffi_var_%s' % name - BFunc = self.ffi.typeof(tp.get_c_name('*(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0] function = module.load_function(BFunc, funcname) ptr = function() def getter(library): diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -31,7 +31,7 @@ k2 = k2.lstrip('0').rstrip('L') modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key, k1, k2) - suffix = _get_so_suffix() + suffix = _get_so_suffixes()[0] self.tmpdir = tmpdir or _caller_dir_pycache() self.sourcefilename = os.path.join(self.tmpdir, modulename + '.c') self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) @@ -42,18 +42,21 @@ def write_source(self, file=None): """Write the C source code. It is produced in 'self.sourcefilename', which can be tweaked beforehand.""" - if self._has_source and file is None: - raise ffiplatform.VerificationError("source code already written") - self._write_source(file) + with self.ffi._lock: + if self._has_source and file is None: + raise ffiplatform.VerificationError( + "source code already written") + self._write_source(file) def compile_module(self): """Write the C source code (if not done already) and compile it. This produces a dynamic link library in 'self.modulefilename'.""" - if self._has_module: - raise ffiplatform.VerificationError("module already compiled") - if not self._has_source: - self._write_source() - self._compile_module() + with self.ffi._lock: + if self._has_module: + raise ffiplatform.VerificationError("module already compiled") + if not self._has_source: + self._write_source() + self._compile_module() def load_library(self): """Get a C module from this Verifier instance. @@ -62,11 +65,14 @@ operations to the C module. If necessary, the C code is written and compiled first. """ - if not self._has_module: - self._locate_module() + with self.ffi._lock: if not self._has_module: - self.compile_module() - return self._load_library() + self._locate_module() + if not self._has_module: + if not self._has_source: + self._write_source() + self._compile_module() + return self._load_library() def get_module_name(self): basename = os.path.basename(self.modulefilename) @@ -81,7 +87,9 @@ def get_extension(self): if not self._has_source: - self._write_source() + with self.ffi._lock: + if not self._has_source: + self._write_source() sourcename = ffiplatform.maybe_relative_path(self.sourcefilename) modname = self.get_module_name() return ffiplatform.get_extension(sourcename, modname, **self.kwds) @@ -103,7 +111,7 @@ else: path = None filename = self._vengine.find_module(self.get_module_name(), path, - _get_so_suffix()) + _get_so_suffixes()) if filename is None: return self.modulefilename = filename @@ -193,7 +201,7 @@ if keep_so: suffix = '.c' # only remove .c files else: - suffix = _get_so_suffix().lower() + suffix = _get_so_suffixes()[0].lower() for fn in filelist: if fn.lower().startswith('_cffi_') and ( fn.lower().endswith(suffix) or fn.lower().endswith('.c')): @@ -213,15 +221,20 @@ except OSError: pass -def _get_so_suffix(): +def _get_so_suffixes(): + suffixes = [] for suffix, mode, type in imp.get_suffixes(): if type == imp.C_EXTENSION: - return suffix - # bah, no C_EXTENSION available. Occurs on pypy without cpyext - if sys.platform == 'win32': - return ".pyd" - else: - return ".so" + suffixes.append(suffix) + + if not suffixes: + # bah, no C_EXTENSION available. Occurs on pypy without cpyext + if sys.platform == 'win32': + suffixes = [".pyd"] + else: + suffixes = [".so"] + + return suffixes def _ensure_dir(filename): try: diff --git a/lib_pypy/numpypy/__init__.py b/lib_pypy/numpypy/__init__.py deleted file mode 100644 --- a/lib_pypy/numpypy/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -from . import core -from .core import * -from . import lib -from .lib import * - -from builtins import bool, int, int, float, complex, object, str, str - -from .core import round, abs, max, min - -__version__ = '1.7.0' - -__all__ = ['__version__'] -__all__ += core.__all__ -__all__ += lib.__all__ - -#import sys -#sys.modules.setdefault('numpy', sys.modules['numpypy']) diff --git a/lib_pypy/numpypy/core/__init__.py b/lib_pypy/numpypy/core/__init__.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ - - -from . import multiarray -from . import umath -from . import numeric -from .numeric import * -from . import fromnumeric -from .fromnumeric import * -from . import shape_base -from .shape_base import * - -from .fromnumeric import amax as max, amin as min, \ - round_ as round -from .numeric import absolute as abs - -__all__ = [] -__all__ += numeric.__all__ -__all__ += fromnumeric.__all__ -__all__ += shape_base.__all__ diff --git a/lib_pypy/numpypy/core/_methods.py b/lib_pypy/numpypy/core/_methods.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/_methods.py +++ /dev/null @@ -1,109 +0,0 @@ -# Array methods which are called by the both the C-code for the method -# and the Python code for the NumPy-namespace function - -from . import multiarray as mu -from . import umath as um -from .numeric import asanyarray - -def _amax(a, axis=None, out=None, keepdims=False): - return um.maximum.reduce(a, axis=axis, - out=out, keepdims=keepdims) - -def _amin(a, axis=None, out=None, keepdims=False): - return um.minimum.reduce(a, axis=axis, - out=out, keepdims=keepdims) - -def _sum(a, axis=None, dtype=None, out=None, keepdims=False): - return um.add.reduce(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def _prod(a, axis=None, dtype=None, out=None, keepdims=False): - return um.multiply.reduce(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def _any(a, axis=None, dtype=None, out=None, keepdims=False): - return um.logical_or.reduce(a, axis=axis, dtype=dtype, out=out, - keepdims=keepdims) - -def _all(a, axis=None, dtype=None, out=None, keepdims=False): - return um.logical_and.reduce(a, axis=axis, dtype=dtype, out=out, - keepdims=keepdims) - -def _count_reduce_items(arr, axis): - if axis is None: - axis = tuple(range(arr.ndim)) - if not isinstance(axis, tuple): - axis = (axis,) - items = 1 - for ax in axis: - items *= arr.shape[ax] - return items - -def _mean(a, axis=None, dtype=None, out=None, keepdims=False): - arr = asanyarray(a) - - # Upgrade bool, unsigned int, and int to float64 - if dtype is None and arr.dtype.kind in ['b','u','i']: - ret = um.add.reduce(arr, axis=axis, dtype='f8', - out=out, keepdims=keepdims) - else: - ret = um.add.reduce(arr, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - rcount = _count_reduce_items(arr, axis) - if isinstance(ret, mu.ndarray): - ret = um.true_divide(ret, rcount, - out=ret, casting='unsafe', subok=False) - else: - ret = ret / float(rcount) - return ret - -def _var(a, axis=None, dtype=None, out=None, ddof=0, - keepdims=False): - arr = asanyarray(a) - - # First compute the mean, saving 'rcount' for reuse later - if dtype is None and arr.dtype.kind in ['b','u','i']: - arrmean = um.add.reduce(arr, axis=axis, dtype='f8', keepdims=True) - else: - arrmean = um.add.reduce(arr, axis=axis, dtype=dtype, keepdims=True) - rcount = _count_reduce_items(arr, axis) - if isinstance(arrmean, mu.ndarray): - arrmean = um.true_divide(arrmean, rcount, - out=arrmean, casting='unsafe', subok=False) - else: - arrmean = arrmean / float(rcount) - - # arr - arrmean - x = arr - arrmean - - # (arr - arrmean) ** 2 - if arr.dtype.kind == 'c': - x = um.multiply(x, um.conjugate(x), out=x).real - else: - x = um.multiply(x, x, out=x) - - # add.reduce((arr - arrmean) ** 2, axis) - ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - - # add.reduce((arr - arrmean) ** 2, axis) / (n - ddof) - if not keepdims and isinstance(rcount, mu.ndarray): - rcount = rcount.squeeze(axis=axis) - rcount -= ddof - if isinstance(ret, mu.ndarray): - ret = um.true_divide(ret, rcount, - out=ret, casting='unsafe', subok=False) - else: - ret = ret / float(rcount) - - return ret - -def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) - - if isinstance(ret, mu.ndarray): - ret = um.sqrt(ret, out=ret) - else: - ret = um.sqrt(ret) - - return ret diff --git a/lib_pypy/numpypy/core/arrayprint.py b/lib_pypy/numpypy/core/arrayprint.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/arrayprint.py +++ /dev/null @@ -1,751 +0,0 @@ -"""Array printing function - -$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ -""" -__all__ = ["array2string", "set_printoptions", "get_printoptions"] -__docformat__ = 'restructuredtext' - -# -# Written by Konrad Hinsen -# last revision: 1996-3-13 -# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) -# and by Perry Greenfield 2000-4-1 for numarray -# and by Travis Oliphant 2005-8-22 for numpy - -import sys -from . import numerictypes as _nt -from .umath import maximum, minimum, absolute, not_equal, isnan, isinf -#from multiarray import format_longfloat, datetime_as_string, datetime_data -from .fromnumeric import ravel - - -def product(x, y): return x*y - -_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension -_summaryThreshold = 1000 # total items > triggers array summarization - -_float_output_precision = 8 -_float_output_suppress_small = False -_line_width = 75 -_nan_str = 'nan' -_inf_str = 'inf' -_formatter = None # formatting function for array elements - -if sys.version_info[0] >= 3: - from functools import reduce - -def set_printoptions(precision=None, threshold=None, edgeitems=None, - linewidth=None, suppress=None, - nanstr=None, infstr=None, - formatter=None): - """ - Set printing options. - - These options determine the way floating point numbers, arrays and - other NumPy objects are displayed. - - Parameters - ---------- - precision : int, optional - Number of digits of precision for floating point output (default 8). - threshold : int, optional - Total number of array elements which trigger summarization - rather than full repr (default 1000). - edgeitems : int, optional - Number of array items in summary at beginning and end of - each dimension (default 3). - linewidth : int, optional - The number of characters per line for the purpose of inserting - line breaks (default 75). - suppress : bool, optional - Whether or not suppress printing of small floating point values - using scientific notation (default False). - nanstr : str, optional - String representation of floating point not-a-number (default nan). - infstr : str, optional - String representation of floating point infinity (default inf). - formatter : dict of callables, optional - If not None, the keys should indicate the type(s) that the respective - formatting function applies to. Callables should return a string. - Types that are not specified (by their corresponding keys) are handled - by the default formatters. Individual types for which a formatter - can be set are:: - - - 'bool' - - 'int' - - 'timedelta' : a `numpy.timedelta64` - - 'datetime' : a `numpy.datetime64` - - 'float' - - 'longfloat' : 128-bit floats - - 'complexfloat' - - 'longcomplexfloat' : composed of two 128-bit floats - - 'numpy_str' : types `numpy.string_` and `numpy.unicode_` - - 'str' : all other strings - - Other keys that can be used to set a group of types at once are:: - - - 'all' : sets all types - - 'int_kind' : sets 'int' - - 'float_kind' : sets 'float' and 'longfloat' - - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' - - See Also - -------- - get_printoptions, set_string_function, array2string - - Notes - ----- - `formatter` is always reset with a call to `set_printoptions`. - - Examples - -------- - Floating point precision can be set: - - >>> np.set_printoptions(precision=4) - >>> print np.array([1.123456789]) - [ 1.1235] - - Long arrays can be summarised: - - >>> np.set_printoptions(threshold=5) - >>> print np.arange(10) - [0 1 2 ..., 7 8 9] - - Small results can be suppressed: - - >>> eps = np.finfo(float).eps - >>> x = np.arange(4.) - >>> x**2 - (x + eps)**2 - array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) - >>> np.set_printoptions(suppress=True) - >>> x**2 - (x + eps)**2 - array([-0., -0., 0., 0.]) - - A custom formatter can be used to display array elements as desired: - - >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) - >>> x = np.arange(3) - >>> x - array([int: 0, int: -1, int: -2]) - >>> np.set_printoptions() # formatter gets reset - >>> x - array([0, 1, 2]) - - To put back the default options, you can use: - - >>> np.set_printoptions(edgeitems=3,infstr='inf', - ... linewidth=75, nanstr='nan', precision=8, - ... suppress=False, threshold=1000, formatter=None) - """ - - global _summaryThreshold, _summaryEdgeItems, _float_output_precision, \ - _line_width, _float_output_suppress_small, _nan_str, _inf_str, \ - _formatter - if linewidth is not None: - _line_width = linewidth - if threshold is not None: - _summaryThreshold = threshold - if edgeitems is not None: - _summaryEdgeItems = edgeitems - if precision is not None: - _float_output_precision = precision - if suppress is not None: - _float_output_suppress_small = not not suppress - if nanstr is not None: - _nan_str = nanstr - if infstr is not None: - _inf_str = infstr - _formatter = formatter - -def get_printoptions(): - """ - Return the current print options. - - Returns - ------- - print_opts : dict - Dictionary of current print options with keys - - - precision : int - - threshold : int - - edgeitems : int - - linewidth : int - - suppress : bool - - nanstr : str - - infstr : str - - formatter : dict of callables - - For a full description of these options, see `set_printoptions`. - - See Also - -------- - set_printoptions, set_string_function - - """ - d = dict(precision=_float_output_precision, - threshold=_summaryThreshold, - edgeitems=_summaryEdgeItems, - linewidth=_line_width, - suppress=_float_output_suppress_small, - nanstr=_nan_str, - infstr=_inf_str, - formatter=_formatter) - return d - -def _leading_trailing(a): - from . import numeric as _nc - if a.ndim == 1: - if len(a) > 2*_summaryEdgeItems: - b = _nc.concatenate((a[:_summaryEdgeItems], - a[-_summaryEdgeItems:])) - else: - b = a - else: - if len(a) > 2*_summaryEdgeItems: - l = [_leading_trailing(a[i]) for i in range( - min(len(a), _summaryEdgeItems))] - l.extend([_leading_trailing(a[-i]) for i in range( - min(len(a), _summaryEdgeItems),0,-1)]) - else: - l = [_leading_trailing(a[i]) for i in range(0, len(a))] - b = _nc.concatenate(tuple(l)) - return b - -def _boolFormatter(x): - if x: - return ' True' - else: - return 'False' - - -def repr_format(x): - return repr(x) - -def _array2string(a, max_line_width, precision, suppress_small, separator=' ', - prefix="", formatter=None): - - if max_line_width is None: - max_line_width = _line_width - - if precision is None: - precision = _float_output_precision - - if suppress_small is None: - suppress_small = _float_output_suppress_small - - if formatter is None: - formatter = _formatter - - if a.size > _summaryThreshold: - summary_insert = "..., " - data = _leading_trailing(a) - else: - summary_insert = "" - data = ravel(a) - - formatdict = {'bool' : _boolFormatter, - 'int' : IntegerFormat(data), - 'float' : FloatFormat(data, precision, suppress_small), - 'longfloat' : FloatFormat(data, precision, suppress_small), - 'complexfloat' : ComplexFormat(data, precision, - suppress_small), - 'longcomplexfloat' : ComplexFormat(data, precision, - suppress_small), - 'datetime' : DatetimeFormat(data), - 'timedelta' : TimedeltaFormat(data), - 'numpystr' : repr_format, - 'str' : str} - - if formatter is not None: - fkeys = [k for k in list(formatter.keys()) if formatter[k] is not None] - if 'all' in fkeys: - for key in list(formatdict.keys()): - formatdict[key] = formatter['all'] - if 'int_kind' in fkeys: - for key in ['int']: - formatdict[key] = formatter['int_kind'] - if 'float_kind' in fkeys: - for key in ['float', 'longfloat']: - formatdict[key] = formatter['float_kind'] - if 'complex_kind' in fkeys: - for key in ['complexfloat', 'longcomplexfloat']: - formatdict[key] = formatter['complex_kind'] - if 'str_kind' in fkeys: - for key in ['numpystr', 'str']: - formatdict[key] = formatter['str_kind'] - for key in list(formatdict.keys()): - if key in fkeys: - formatdict[key] = formatter[key] - - try: - format_function = a._format - msg = "The `_format` attribute is deprecated in Numpy 2.0 and " \ - "will be removed in 2.1. Use the `formatter` kw instead." - import warnings - warnings.warn(msg, DeprecationWarning) - except AttributeError: - # find the right formatting function for the array - dtypeobj = a.dtype.type - if issubclass(dtypeobj, _nt.bool_): - format_function = formatdict['bool'] - elif issubclass(dtypeobj, _nt.integer): - #if issubclass(dtypeobj, _nt.timedelta64): - # format_function = formatdict['timedelta'] - #else: - format_function = formatdict['int'] - elif issubclass(dtypeobj, _nt.floating): - if hasattr(_nt, 'longfloat') and issubclass(dtypeobj, _nt.longfloat): - format_function = formatdict['longfloat'] - else: - format_function = formatdict['float'] - elif issubclass(dtypeobj, _nt.complexfloating): - if hasattr(_nt, 'clongfloat') and issubclass(dtypeobj, _nt.clongfloat): - format_function = formatdict['longcomplexfloat'] - else: - format_function = formatdict['complexfloat'] - elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)): - format_function = formatdict['numpystr'] - #elif issubclass(dtypeobj, _nt.datetime64): - # format_function = formatdict['datetime'] - else: - format_function = formatdict['str'] - - # skip over "[" - next_line_prefix = " " - # skip over array( - next_line_prefix += " "*len(prefix) - - lst = _formatArray(a, format_function, len(a.shape), max_line_width, - next_line_prefix, separator, - _summaryEdgeItems, summary_insert)[:-1] - return lst - -def _convert_arrays(obj): - from . import numeric as _nc - newtup = [] - for k in obj: - if isinstance(k, _nc.ndarray): - k = k.tolist() - elif isinstance(k, tuple): - k = _convert_arrays(k) - newtup.append(k) - return tuple(newtup) - - -def array2string(a, max_line_width=None, precision=None, - suppress_small=None, separator=' ', prefix="", - style=repr, formatter=None): - """ - Return a string representation of an array. - - Parameters - ---------- - a : ndarray - Input array. - max_line_width : int, optional - The maximum number of columns the string should span. Newline - characters splits the string appropriately after array elements. - precision : int, optional - Floating point precision. Default is the current printing - precision (usually 8), which can be altered using `set_printoptions`. - suppress_small : bool, optional - Represent very small numbers as zero. A number is "very small" if it - is smaller than the current printing precision. - separator : str, optional - Inserted between elements. - prefix : str, optional - An array is typically printed as:: - - 'prefix(' + array2string(a) + ')' - - The length of the prefix string is used to align the - output correctly. - style : function, optional - A function that accepts an ndarray and returns a string. Used only - when the shape of `a` is equal to ``()``, i.e. for 0-D arrays. - formatter : dict of callables, optional - If not None, the keys should indicate the type(s) that the respective - formatting function applies to. Callables should return a string. - Types that are not specified (by their corresponding keys) are handled - by the default formatters. Individual types for which a formatter - can be set are:: - - - 'bool' - - 'int' - - 'timedelta' : a `numpy.timedelta64` - - 'datetime' : a `numpy.datetime64` - - 'float' - - 'longfloat' : 128-bit floats - - 'complexfloat' - - 'longcomplexfloat' : composed of two 128-bit floats - - 'numpy_str' : types `numpy.string_` and `numpy.unicode_` - - 'str' : all other strings - - Other keys that can be used to set a group of types at once are:: - - - 'all' : sets all types - - 'int_kind' : sets 'int' - - 'float_kind' : sets 'float' and 'longfloat' - - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' - - Returns - ------- - array_str : str - String representation of the array. - - Raises - ------ - TypeError : if a callable in `formatter` does not return a string. - - See Also - -------- - array_str, array_repr, set_printoptions, get_printoptions - - Notes - ----- - If a formatter is specified for a certain type, the `precision` keyword is - ignored for that type. - - Examples - -------- - >>> x = np.array([1e-16,1,2,3]) - >>> print np.array2string(x, precision=2, separator=',', - ... suppress_small=True) - [ 0., 1., 2., 3.] - - >>> x = np.arange(3.) - >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) - '[0.00 1.00 2.00]' - - >>> x = np.arange(3) - >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) - '[0x0L 0x1L 0x2L]' - - """ - - if a.shape == (): - x = a.item() - try: - lst = a._format(x) - msg = "The `_format` attribute is deprecated in Numpy " \ - "2.0 and will be removed in 2.1. Use the " \ - "`formatter` kw instead." - import warnings - warnings.warn(msg, DeprecationWarning) - except AttributeError: - if isinstance(x, tuple): - x = _convert_arrays(x) - lst = style(x) - elif reduce(product, a.shape) == 0: - # treat as a null array if any of shape elements == 0 - lst = "[]" - else: - lst = _array2string(a, max_line_width, precision, suppress_small, - separator, prefix, formatter=formatter) - return lst - -def _extendLine(s, line, word, max_line_len, next_line_prefix): - if len(line.rstrip()) + len(word.rstrip()) >= max_line_len: - s += line.rstrip() + "\n" - line = next_line_prefix - line += word - return s, line - - -def _formatArray(a, format_function, rank, max_line_len, - next_line_prefix, separator, edge_items, summary_insert): - """formatArray is designed for two modes of operation: - - 1. Full output - - 2. Summarized output - - """ - if rank == 0: - obj = a.item() - if isinstance(obj, tuple): - obj = _convert_arrays(obj) - return str(obj) - - if summary_insert and 2*edge_items < len(a): - leading_items, trailing_items, summary_insert1 = \ - edge_items, edge_items, summary_insert - else: - leading_items, trailing_items, summary_insert1 = 0, len(a), "" - - if rank == 1: - s = "" - line = next_line_prefix - for i in range(leading_items): - word = format_function(a[i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - - if summary_insert1: - s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix) From noreply at buildbot.pypy.org Thu Nov 14 09:17:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 09:17:03 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Updates Message-ID: <20131114081703.B30D01C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r68015:8eb5b5ac4bba Date: 2013-11-14 09:16 +0100 http://bitbucket.org/pypy/pypy/changeset/8eb5b5ac4bba/ Log: Updates diff --git a/pypy/doc/release-2.2.0.rst b/pypy/doc/release-2.2.0.rst --- a/pypy/doc/release-2.2.0.rst +++ b/pypy/doc/release-2.2.0.rst @@ -63,22 +63,25 @@ * NumPy has been split: now PyPy only contains the core module, called ``_numpypy``. The ``numpy`` module itself has been moved to - ``https://bitbucket.org/pypy/numpy``. You need to install it - separately in a virtualenv with ``pip install - git+https://bitbucket.org/pypy/numpy.git``. + ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. + You need to install NumPy separately with a virtualenv: + ``pip install git+https://bitbucket.org/pypy/numpy.git``; + or by directly doing + ``git clone https://bitbucket.org/pypy/numpy.git``, + ``cd numpy``, ``python setup.py install``. * non-inlined calls have less overhead * Things that use ``sys.set_trace`` are now JITted (like coverage) -* JSON encoding is faster +* JSON encoding used to be very fast, now decoding is as well * various buffer copying methods experience speedups (like list-of-ints to ``int[]`` buffer from cffi) -* We finally wrote all the missing ``os.xxx()`` functions. There are - a lot of strange ones that nobody ever heard about, except those who - really need them. +* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, + including ``os.startfile()`` on Windows and a handful of rare ones + on Posix. * numpy has a rudimentary C API that cooperates with ``cpyext`` From noreply at buildbot.pypy.org Thu Nov 14 09:33:53 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 09:33:53 +0100 (CET) Subject: [pypy-commit] pypy numpy-newbyteorder: merge default Message-ID: <20131114083353.336791C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-newbyteorder Changeset: r68016:df3217200923 Date: 2013-11-13 23:54 -0500 http://bitbucket.org/pypy/pypy/changeset/df3217200923/ Log: merge default diff too long, truncating to 2000 out of 5219 lines diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -4,6 +4,21 @@ from __pypy__.builders import StringBuilder, UnicodeBuilder +class StringOrUnicodeBuilder(object): + def __init__(self): + self._builder = StringBuilder() + def append(self, string): + try: + self._builder.append(string) + except UnicodeEncodeError: + ub = UnicodeBuilder() + ub.append(self._builder.build()) + self._builder = ub + ub.append(string) + def build(self): + return self._builder.build() + + ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') @@ -192,7 +207,7 @@ if self.ensure_ascii: builder = StringBuilder() else: - builder = UnicodeBuilder() + builder = StringOrUnicodeBuilder() self.__encode(o, markers, builder, 0) return builder.build() diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -48,7 +48,14 @@ def remove(wr, selfref=ref(self)): self = selfref() if self is not None: - del self.data[wr.key] + # Changed this for PyPy: made more resistent. The + # issue is that in some corner cases, self.data + # might already be changed or removed by the time + # this weakref's callback is called. If that is + # the case, we don't want to randomly kill an + # unrelated entry. + if self.data.get(wr.key) is wr: + del self.data[wr.key] self._remove = remove UserDict.UserDict.__init__(self, *args, **kw) @@ -160,22 +167,26 @@ try: o = self.data.pop(key)() except KeyError: + o = None + if o is None: if args: return args[0] - raise - if o is None: raise KeyError, key else: return o + # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: - wr = self.data[key] + o = self.data[key]() except KeyError: + o = None + if o is None: self.data[key] = KeyedRef(default, self._remove, key) return default else: - return wr() + return o + # The logic above was fixed in PyPy def update(self, dict=None, **kwargs): d = self.data diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.7 +Version: 0.8 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7.2" -__version_info__ = (0, 7, 2) +__version__ = "0.8" +__version_info__ = (0, 8) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,5 @@ import types +from .lock import allocate_lock try: callable @@ -61,6 +62,7 @@ # rely on it! It's probably not going to work well.) self._backend = backend + self._lock = allocate_lock() self._parser = cparser.Parser() self._cached_btypes = {} self._parsed_types = types.ModuleType('parsed_types').__dict__ @@ -74,7 +76,8 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - self.BVoidP = self._get_cached_btype(model.voidp_type) + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -95,11 +98,12 @@ if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') - self._parser.parse(csource, override=override) - self._cdefsources.append(csource) - if override: - for cache in self._function_caches: - cache.clear() + with self._lock: + self._parser.parse(csource, override=override) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -109,31 +113,47 @@ library we only look for the actual (untyped) symbols. """ assert isinstance(name, basestring) or name is None - lib, function_cache = _make_ffi_library(self, name, flags) - self._function_caches.append(function_cache) - self._libraries.append(lib) + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) return lib + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + really_a_function_type = type.is_raw_function + if really_a_function_type: + type = type.as_function_pointer() + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, cfaf = self._parsed_types[cdecl] - if consider_function_as_funcptr and not cfaf: - raise KeyError + result = self._parsed_types[cdecl] except KeyError: - key = cdecl - if not isinstance(cdecl, str): # unicode, on Python 2 - cdecl = cdecl.encode('ascii') - cfaf = consider_function_as_funcptr - type = self._parser.parse_type(cdecl, - consider_function_as_funcptr=cfaf) - btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, cfaf + with self._lock: + result = self._typeof_locked(cdecl) + # + btype, really_a_function_type = result + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) return btype def typeof(self, cdecl): """Parse the C type given as a string and return the - corresponding Python type: '>. + corresponding object. It can also be used on 'cdata' instance to get its C type. """ if isinstance(cdecl, basestring): @@ -144,6 +164,10 @@ res = _builtin_function_type(cdecl) if res is not None: return res + if (isinstance(cdecl, types.FunctionType) + and hasattr(cdecl, '_cffi_base_type')): + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) def sizeof(self, cdecl): @@ -280,14 +304,17 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + with self._lock: + try: + gc_weakrefs = self.gc_weakrefs + except AttributeError: + from .gc_weakref import GcWeakrefs + gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) + return gc_weakrefs.build(cdata, destructor) def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! try: BType = self._cached_btypes[type] except KeyError: @@ -322,7 +349,8 @@ def _pointer_to(self, ctype): from . import model - return model.pointer_cache(self, ctype) + with self._lock: + return model.pointer_cache(self, ctype) def addressof(self, cdata, field=None): """Return the address of a . @@ -342,10 +370,12 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ - self._parser.include(ffi_to_include._parser) - self._cdefsources.append('[') - self._cdefsources.extend(ffi_to_include._cdefsources) - self._cdefsources.append(']') + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) @@ -372,7 +402,7 @@ backendlib = backend.load_library(path, flags) copied_enums = [] # - def make_accessor(name): + def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: tp = ffi._parser._declarations[key] @@ -404,11 +434,17 @@ if enumname not in library.__dict__: library.__dict__[enumname] = enumval copied_enums.append(True) + if name in library.__dict__: + return # - if name in library.__dict__: # copied from an enum value just above, - return # or multithread's race condition raise AttributeError(name) # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + make_accessor_locked(name) + # class FFILibrary(object): def __getattr__(self, name): make_accessor(name) @@ -444,4 +480,5 @@ except (KeyError, AttributeError, TypeError): return None else: - return ffi._get_cached_btype(tp) + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -142,7 +142,7 @@ if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] if line: - msg = 'cannot parse "%s"\n%s' % (line, msg) + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) @@ -217,19 +217,18 @@ # if decl.name: tp = self._get_type(node, partial_length_ok=True) - if self._is_constant_declaration(node): + if self._is_constant_globalvar(node): self._declare('constant ' + decl.name, tp) else: self._declare('variable ' + decl.name, tp) - def parse_type(self, cdecl, consider_function_as_funcptr=False): + def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type, - consider_function_as_funcptr=consider_function_as_funcptr) + return self._get_type(exprnode.type) def _declare(self, name, obj): if name in self._declarations: @@ -249,28 +248,17 @@ return model.ConstPointerType(type) return model.PointerType(type) - def _get_type(self, typenode, convert_array_to_pointer=False, - name=None, partial_length_ok=False, - consider_function_as_funcptr=False): + def _get_type(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): type = self._declarations['typedef ' + typenode.type.names[0]] - if isinstance(type, model.ArrayType): - if convert_array_to_pointer: - return type.item - else: - if (consider_function_as_funcptr and - isinstance(type, model.RawFunctionType)): - return type.as_function_pointer() return type # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type - if convert_array_to_pointer: - return self._get_type_pointer(self._get_type(typenode.type)) if typenode.dim is None: length = None else: @@ -331,10 +319,7 @@ # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - result = self._parse_function_type(typenode, name) - if consider_function_as_funcptr: - result = result.as_function_pointer() - return result + return self._parse_function_type(typenode, name) # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): @@ -365,21 +350,24 @@ isinstance(params[0].type.type, pycparser.c_ast.IdentifierType) and list(params[0].type.type.names) == ['void']): del params[0] - args = [self._get_type(argdeclnode.type, - convert_array_to_pointer=True, - consider_function_as_funcptr=True) + args = [self._as_func_arg(self._get_type(argdeclnode.type)) for argdeclnode in params] result = self._get_type(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _is_constant_declaration(self, typenode, const=False): - if isinstance(typenode, pycparser.c_ast.ArrayDecl): - return self._is_constant_declaration(typenode.type) + def _as_func_arg(self, type): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _is_constant_globalvar(self, typenode): if isinstance(typenode, pycparser.c_ast.PtrDecl): - const = 'const' in typenode.quals - return self._is_constant_declaration(typenode.type, const) + return 'const' in typenode.quals if isinstance(typenode, pycparser.c_ast.TypeDecl): - return const or 'const' in typenode.quals + return 'const' in typenode.quals return False def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): @@ -491,7 +479,7 @@ return tp def _make_partial(self, tp, nested): - if not isinstance(tp, model.StructType): + if not isinstance(tp, model.StructOrUnion): raise api.CDefError("%s cannot be partial" % (tp,)) if not tp.has_c_name() and not nested: raise NotImplementedError("%s is partial but has no C name" %(tp,)) @@ -511,7 +499,7 @@ if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): self._partial_length = True - return None + return '...' # raise api.FFIError("unsupported expression: expected a " "simple numeric constant") diff --git a/lib_pypy/cffi/gc_weakref.py b/lib_pypy/cffi/gc_weakref.py --- a/lib_pypy/cffi/gc_weakref.py +++ b/lib_pypy/cffi/gc_weakref.py @@ -14,6 +14,6 @@ def build(self, cdata, destructor): # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi.typeof(cdata), cdata) + new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) self.data[ref(new_cdata, self.remove)] = destructor, cdata return new_cdata diff --git a/lib_pypy/cffi/lock.py b/lib_pypy/cffi/lock.py new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,7 +1,10 @@ import weakref +from .lock import allocate_lock + class BaseTypeByIdentity(object): is_array_type = False + is_raw_function = False def get_c_name(self, replace_with='', context='a C file'): result = self.c_name_with_marker @@ -146,6 +149,7 @@ # a function, but not a pointer-to-function. The backend has no # notion of such a type; it's used temporarily by parsing. _base_pattern = '(&)(%s)' + is_raw_function = True def build_backend_type(self, ffi, finishlist): from . import api @@ -192,10 +196,6 @@ _base_pattern = " const *&" _base_pattern_array = "(const *&)" - def build_backend_type(self, ffi, finishlist): - BPtr = PointerType(self.totype).get_cached_btype(ffi, finishlist) - return BPtr - const_voidp_type = ConstPointerType(void_type) @@ -216,10 +216,12 @@ self.item = item self.length = length # - if self.length is None: + if length is None: brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' else: - brackets = '&[%d]' % self.length + brackets = '&[%d]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) @@ -227,6 +229,10 @@ return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): + if self.length == '...': + from . import api + raise api.CDefError("cannot render the type %r: unknown length" % + (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) @@ -252,6 +258,7 @@ class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None completed = False + partial = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -303,20 +310,21 @@ return # not completing it: it's an opaque struct # self.completed = 1 - fldtypes = tuple(tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes) # if self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) ffi._backend.complete_struct_or_union(BType, lst, self) # else: + fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # - if isinstance(ftype, ArrayType) and ftype.length is None: + if isinstance(ftype, ArrayType) and ftype.length == '...': # fix the length to match the total size BItemType = ftype.item.get_cached_btype(ffi, finishlist) nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) @@ -327,18 +335,20 @@ ftype = ftype.resolve_length(nlen) self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) - BArrayType = ftype.get_cached_btype(ffi, finishlist) - fldtypes = (fldtypes[:i] + (BArrayType,) + - fldtypes[i+1:]) - continue # - bitemsize = ffi.sizeof(fldtypes[i]) - if bitemsize != fsize: - self._verification_error( - "field '%s.%s' is declared as %d bytes, but is " - "really %d bytes" % (self.name, - self.fldnames[i] or '{}', - bitemsize, fsize)) + BFieldType = ftype.get_cached_btype(ffi, finishlist) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) + fldtypes.append(BFieldType) + # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) ffi._backend.complete_struct_or_union(BType, lst, self, totalsize, totalalignment) @@ -348,11 +358,6 @@ from .ffiplatform import VerificationError raise VerificationError(msg) - -class StructType(StructOrUnion): - kind = 'struct' - partial = False - def check_not_partial(self): if self.partial and self.fixedlayout is None: from . import ffiplatform @@ -361,19 +366,18 @@ def build_backend_type(self, ffi, finishlist): self.check_not_partial() finishlist.append(self) - - return global_cache(self, ffi, 'new_struct_type', + # + return global_cache(self, ffi, 'new_%s_type' % self.kind, self.get_official_name(), key=self) +class StructType(StructOrUnion): + kind = 'struct' + + class UnionType(StructOrUnion): kind = 'union' - def build_backend_type(self, ffi, finishlist): - finishlist.append(self) - return global_cache(self, ffi, 'new_union_type', - self.get_official_name(), key=self) - class EnumType(StructOrUnionOrEnum): kind = 'enum' @@ -387,6 +391,12 @@ self.baseinttype = baseinttype self.build_c_name_with_marker() + def force_the_name(self, forcename): + StructOrUnionOrEnum.force_the_name(self, forcename) + if self.forcename is None: + name = self.get_official_name() + self.forcename = '$' + name.replace(' ', '_') + def check_not_partial(self): if self.partial and not self.partial_resolved: from . import ffiplatform @@ -444,6 +454,9 @@ tp = StructType(structname, None, None, None) return NamedPointerType(tp, name) + +global_lock = allocate_lock() + def global_cache(srctype, ffi, funcname, *args, **kwds): key = kwds.pop('key', (funcname, args)) assert not kwds @@ -464,8 +477,17 @@ res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: raise NotImplementedError("%r: %s" % (srctype, e)) - ffi._backend.__typecache[key] = res - return res + # note that setdefault() on WeakValueDictionary is not atomic + # and contains a rare bug (http://bugs.python.org/issue19542); + # we have to use a lock and do it ourselves + cache = ffi._backend.__typecache + with global_lock: + res1 = cache.get(key) + if res1 is None: + cache[key] = res + return res + else: + return res1 def pointer_cache(ffi, BType): return global_cache('?', ffi, 'new_pointer_type', BType) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -15,7 +15,7 @@ def patch_extension_kwds(self, kwds): pass - def find_module(self, module_name, path, so_suffix): + def find_module(self, module_name, path, so_suffixes): try: f, filename, descr = imp.find_module(module_name, path) except ImportError: @@ -25,7 +25,7 @@ # Note that after a setuptools installation, there are both .py # and .so files with the same basename. The code here relies on # imp.find_module() locating the .so in priority. - if descr[0] != so_suffix: + if descr[0] not in so_suffixes: return None return filename @@ -160,7 +160,10 @@ def __dir__(self): return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() - module._cffi_setup(lst, ffiplatform.VerificationError, library) + if module._cffi_setup(lst, ffiplatform.VerificationError, library): + import warnings + warnings.warn("reimporting %r might overwrite older definitions" + % (self.verifier.get_module_name())) # # finally, call the loaded_cpy_xxx() functions. This will perform # the final adjustments, like copying the Python->C wrapper @@ -280,8 +283,8 @@ return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, model.ArrayType): - return '_cffi_from_c_deref((char *)%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) elif isinstance(tp, model.StructType): if tp.fldnames is None: raise TypeError("'%s' is used as %s, but is opaque" % ( @@ -464,11 +467,14 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return _cffi_get_struct_layout(nums);') @@ -491,7 +497,7 @@ # function = getattr(module, layoutfuncname) layout = function() - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -528,9 +534,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -566,7 +573,7 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True): + vartp=None, delayed=True, size_too=False): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -597,6 +604,15 @@ '(unsigned long long)(%s));' % (name,)) prnt(' if (o == NULL)') prnt(' return -1;') + if size_too: + prnt(' {') + prnt(' PyObject *o1 = o;') + prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));' + % (name,)) + prnt(' Py_DECREF(o1);') + prnt(' if (o == NULL)') + prnt(' return -1;') + prnt(' }') prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name) prnt(' Py_DECREF(o);') prnt(' if (res < 0)') @@ -633,12 +649,23 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %d, ' - 'not %d",') - prnt(' "%s", "%s", (int)%s, %d);' % ( - name, enumerator, enumerator, enumvalue)) + prnt(' "enum %s: %s has the real value %s, ' + 'not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + name, enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return %s;' % self._chained_list_constants[True]) @@ -677,15 +704,16 @@ def _generate_cpy_variable_collecttype(self, tp, name): if isinstance(tp, model.ArrayType): - self._do_collect_type(tp) + tp_ptr = model.PointerType(tp.item) else: tp_ptr = model.PointerType(tp) - self._do_collect_type(tp_ptr) + self._do_collect_type(tp_ptr) def _generate_cpy_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): tp_ptr = model.PointerType(tp.item) - self._generate_cpy_const(False, name, tp, vartp=tp_ptr) + self._generate_cpy_const(False, name, tp, vartp=tp_ptr, + size_too = (tp.length == '...')) else: tp_ptr = model.PointerType(tp) self._generate_cpy_const(False, name, tp_ptr, category='var') @@ -694,11 +722,29 @@ _loading_cpy_variable = _loaded_noop def _loaded_cpy_variable(self, tp, name, module, library): + value = getattr(library, name) if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the - return # sense that "a=..." is forbidden + # sense that "a=..." is forbidden + if tp.length == '...': + assert isinstance(value, tuple) + (value, size) = value + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. - ptr = getattr(library, name) + ptr = value delattr(library, name) def getter(library): return ptr[0] @@ -711,12 +757,9 @@ def _generate_setup_custom(self): prnt = self._prnt - prnt('static PyObject *_cffi_setup_custom(PyObject *lib)') + prnt('static int _cffi_setup_custom(PyObject *lib)') prnt('{') - prnt(' if (%s < 0)' % self._chained_list_constants[True]) - prnt(' return NULL;') - prnt(' Py_INCREF(Py_None);') - prnt(' return Py_None;') + prnt(' return %s;' % self._chained_list_constants[True]) prnt('}') cffimod_header = r''' @@ -834,17 +877,20 @@ static void *_cffi_exports[_CFFI_NUM_EXPORTS]; static PyObject *_cffi_types, *_cffi_VerificationError; -static PyObject *_cffi_setup_custom(PyObject *lib); /* forward */ +static int _cffi_setup_custom(PyObject *lib); /* forward */ static PyObject *_cffi_setup(PyObject *self, PyObject *args) { PyObject *library; + int was_alive = (_cffi_types != NULL); if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, &library)) return NULL; Py_INCREF(_cffi_types); Py_INCREF(_cffi_VerificationError); - return _cffi_setup_custom(library); + if (_cffi_setup_custom(library) < 0) + return NULL; + return PyBool_FromLong(was_alive); } static void _cffi_init(void) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -20,15 +20,15 @@ # up in kwds['export_symbols']. kwds.setdefault('export_symbols', self.export_symbols) - def find_module(self, module_name, path, so_suffix): - basename = module_name + so_suffix - if path is None: - path = sys.path - for dirname in path: - filename = os.path.join(dirname, basename) - if os.path.isfile(filename): - return filename - return None + def find_module(self, module_name, path, so_suffixes): + for so_suffix in so_suffixes: + basename = module_name + so_suffix + if path is None: + path = sys.path + for dirname in path: + filename = os.path.join(dirname, basename) + if os.path.isfile(filename): + return filename def collect_types(self): pass # not needed in the generic engine @@ -173,6 +173,7 @@ newfunction = self._load_constant(False, tp, name, module) else: indirections = [] + base_tp = tp if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): indirect_args = [] for i, typ in enumerate(tp.args): @@ -186,16 +187,18 @@ wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) for i, typ in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, typ) + newfunction = self._make_struct_wrapper(newfunction, i, typ, + base_tp) setattr(library, name, newfunction) type(library)._cffi_dir.append(name) - def _make_struct_wrapper(self, oldfunc, i, tp): + def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): backend = self.ffi._backend BType = self.ffi._get_cached_btype(tp) def newfunc(*args): args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] return oldfunc(*args) + newfunc._cffi_base_type = base_tp return newfunc # ---------- @@ -252,11 +255,14 @@ prnt(' static ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return nums[i];') @@ -270,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi.typeof("ssize_t(*)(ssize_t)") + BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -279,7 +285,7 @@ if x < 0: break layout.append(x) num += 1 - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -316,9 +322,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -379,15 +386,17 @@ def _load_constant(self, is_int, tp, name, module): funcname = '_cffi_const_%s' % name if is_int: - BFunc = self.ffi.typeof("int(*)(long long*)") + BType = self.ffi._typeof_locked("long long*")[0] + BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) - p = self.ffi.new("long long*") + p = self.ffi.new(BType) negative = function(p) value = int(p[0]) if value < 0 and not negative: - value += (1 << (8*self.ffi.sizeof("long long"))) + BLongLong = self.ffi._typeof_locked("long long")[0] + value += (1 << (8*self.ffi.sizeof(BLongLong))) else: - BFunc = self.ffi.typeof(tp.get_c_name('(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() return value @@ -413,11 +422,22 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' snprintf(out_error, 255,' - '"%s has the real value %d, not %d",') - prnt(' "%s", (int)%s, %d);' % ( - enumerator, enumerator, enumvalue)) + ' "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % ( + enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') @@ -431,10 +451,11 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BFunc = self.ffi.typeof("int(*)(char*)") + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = '_cffi_e_%s_%s' % (prefix, name) function = module.load_function(BFunc, funcname) - p = self.ffi.new("char[]", 256) + p = self.ffi.new(BType, 256) if function(p) < 0: error = self.ffi.string(p) if sys.version_info >= (3,): @@ -465,6 +486,14 @@ def _generate_gen_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): + if tp.length == '...': + prnt = self._prnt + funcname = '_cffi_sizeof_%s' % (name,) + self.export_symbols.append(funcname) + prnt("size_t %s(void)" % funcname) + prnt("{") + prnt(" return sizeof(%s);" % (name,)) + prnt("}") tp_ptr = model.PointerType(tp.item) self._generate_gen_const(False, name, tp_ptr) else: @@ -476,6 +505,18 @@ def _loaded_gen_variable(self, tp, name, module, library): if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the # sense that "a=..." is forbidden + if tp.length == '...': + funcname = '_cffi_sizeof_%s' % (name,) + BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0] + function = module.load_function(BFunc, funcname) + size = function() + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) tp_ptr = model.PointerType(tp.item) value = self._load_constant(False, tp_ptr, name, module) # 'value' is a which we have to replace with @@ -489,7 +530,7 @@ # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. funcname = '_cffi_var_%s' % name - BFunc = self.ffi.typeof(tp.get_c_name('*(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0] function = module.load_function(BFunc, funcname) ptr = function() def getter(library): diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -31,7 +31,7 @@ k2 = k2.lstrip('0').rstrip('L') modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key, k1, k2) - suffix = _get_so_suffix() + suffix = _get_so_suffixes()[0] self.tmpdir = tmpdir or _caller_dir_pycache() self.sourcefilename = os.path.join(self.tmpdir, modulename + '.c') self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) @@ -42,18 +42,21 @@ def write_source(self, file=None): """Write the C source code. It is produced in 'self.sourcefilename', which can be tweaked beforehand.""" - if self._has_source and file is None: - raise ffiplatform.VerificationError("source code already written") - self._write_source(file) + with self.ffi._lock: + if self._has_source and file is None: + raise ffiplatform.VerificationError( + "source code already written") + self._write_source(file) def compile_module(self): """Write the C source code (if not done already) and compile it. This produces a dynamic link library in 'self.modulefilename'.""" - if self._has_module: - raise ffiplatform.VerificationError("module already compiled") - if not self._has_source: - self._write_source() - self._compile_module() + with self.ffi._lock: + if self._has_module: + raise ffiplatform.VerificationError("module already compiled") + if not self._has_source: + self._write_source() + self._compile_module() def load_library(self): """Get a C module from this Verifier instance. @@ -62,11 +65,14 @@ operations to the C module. If necessary, the C code is written and compiled first. """ - if not self._has_module: - self._locate_module() + with self.ffi._lock: if not self._has_module: - self.compile_module() - return self._load_library() + self._locate_module() + if not self._has_module: + if not self._has_source: + self._write_source() + self._compile_module() + return self._load_library() def get_module_name(self): basename = os.path.basename(self.modulefilename) @@ -81,7 +87,9 @@ def get_extension(self): if not self._has_source: - self._write_source() + with self.ffi._lock: + if not self._has_source: + self._write_source() sourcename = ffiplatform.maybe_relative_path(self.sourcefilename) modname = self.get_module_name() return ffiplatform.get_extension(sourcename, modname, **self.kwds) @@ -103,7 +111,7 @@ else: path = None filename = self._vengine.find_module(self.get_module_name(), path, - _get_so_suffix()) + _get_so_suffixes()) if filename is None: return self.modulefilename = filename @@ -193,7 +201,7 @@ if keep_so: suffix = '.c' # only remove .c files else: - suffix = _get_so_suffix().lower() + suffix = _get_so_suffixes()[0].lower() for fn in filelist: if fn.lower().startswith('_cffi_') and ( fn.lower().endswith(suffix) or fn.lower().endswith('.c')): @@ -213,15 +221,20 @@ except OSError: pass -def _get_so_suffix(): +def _get_so_suffixes(): + suffixes = [] for suffix, mode, type in imp.get_suffixes(): if type == imp.C_EXTENSION: - return suffix - # bah, no C_EXTENSION available. Occurs on pypy without cpyext - if sys.platform == 'win32': - return ".pyd" - else: - return ".so" + suffixes.append(suffix) + + if not suffixes: + # bah, no C_EXTENSION available. Occurs on pypy without cpyext + if sys.platform == 'win32': + suffixes = [".pyd"] + else: + suffixes = [".so"] + + return suffixes def _ensure_dir(filename): try: diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -19,11 +19,13 @@ # CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -import types +import types, re from pyrepl import unicodedata_ from pyrepl import commands from pyrepl import input +_r_csi_seq = re.compile(r"\033\[[ -@]*[A-~]") + def _make_unctrl_map(): uc_map = {} for c in map(unichr, range(256)): @@ -309,6 +311,10 @@ excluded from the length calculation. So also a copy of the prompt is returned with these control characters removed. """ + # The logic below also ignores the length of common escape + # sequences if they were not explicitly within \x01...\x02. + # They are CSI (or ANSI) sequences ( ESC [ ... LETTER ) + out_prompt = '' l = len(prompt) pos = 0 @@ -321,9 +327,13 @@ break # Found start and end brackets, subtract from string length l = l - (e-s+1) - out_prompt += prompt[pos:s] + prompt[s+1:e] + keep = prompt[pos:s] + l -= sum(map(len, _r_csi_seq.findall(keep))) + out_prompt += keep + prompt[s+1:e] pos = e+1 - out_prompt += prompt[pos:] + keep = prompt[pos:] + l -= sum(map(len, _r_csi_seq.findall(keep))) + out_prompt += keep return out_prompt, l def bow(self, p=None): diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -17,24 +17,24 @@ David Schneider Holger Krekel Christian Tismer + Matti Picus Hakan Ardo Benjamin Peterson - Matti Picus Philip Jenvey Anders Chrigstrom Brian Kearns + Manuel Jacob Eric van Riet Paap + Wim Lavrijsen Richard Emslie Alexander Schremmer - Wim Lavrijsen Dan Villiom Podlaski Christiansen - Manuel Jacob + Ronan Lamy Lukas Diekmann Sven Hager Anders Lehmann Aurelien Campeas Niklaus Haldimann - Ronan Lamy Camillo Bruni Laura Creighton Toon Verwaest @@ -45,8 +45,10 @@ David Edelsohn Anders Hammarquist Jakub Gustak + Romain Guillebert Guido Wesdorp Lawrence Oluyede + Remi Meier Bartosz Skowron Daniel Roberts Niko Matsakis @@ -54,18 +56,17 @@ Ludovic Aubry Alexander Hesse Jacob Hallen - Romain Guillebert Jason Creighton Alex Martelli Michal Bendowski Jan de Mooij + stian Michael Foord Stephan Diehl Stefan Schwarzer Valentino Volonghi Tomek Meka Patrick Maupin - stian Bob Ippolito Bruno Gola Jean-Paul Calderone @@ -74,29 +75,33 @@ Simon Burton Marius Gedminas John Witulski + Konstantin Lopuhin Greg Price Dario Bertini Mark Pearse Simon Cross - Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy Adrian Kuhn Boris Feigin + Stefano Rivera tav + Taavi Burns Georg Brandl Bert Freudenberg Stian Andreassen - Stefano Rivera Wanja Saatkamp Gerald Klix Mike Blume - Taavi Burns Oscar Nierstrasz + Stefan H. Muller + Laurence Tratt + Rami Chowdhury David Malcolm Eugene Oden Henry Mason @@ -105,14 +110,15 @@ David Ripton Dusty Phillips Lukas Renggli + Edd Barrett Guenter Jantzen Tobias Oberstein - Remi Meier Ned Batchelder Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin + Andrew Chambers Michael Schneider Nicholas Riley Jason Chu @@ -128,10 +134,13 @@ Olivier Dormond Jared Grubb Karl Bartel + Tobias Pape Brian Dorsey Victor Stinner + Andrews Medina Stuart Williams Jasper Schulz + Christian Hudon Toby Watson Antoine Pitrou Aaron Iles @@ -141,7 +150,6 @@ Neil Shepperd Mikael Schönenberg Elmo Mäntynen - Tobias Pape Jonathan David Riehl Stanislaw Halik Anders Qvist @@ -153,19 +161,15 @@ Alexander Sedov Corbin Simpson Christopher Pope - Laurence Tratt - Guillebert Romain Christian Tismer Dan Stromberg Stefano Parmesan - Christian Hudon Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang Gabriel - Paweł Piotr Przeradowski Andrew Dalke Sylvain Thenault Nathan Taylor @@ -189,13 +193,15 @@ Martin Blais Lene Wagner Tomo Cocoa - Andrews Medina roberto at goyle + Yury V. Zaytsev + Anna Katrina Dominguez William Leslie Bobby Impollonia timo at eistee.fritz.box Andrew Thompson Yusei Tahara + Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado Godefroid Chappelle @@ -209,7 +215,7 @@ Akira Li Gustavo Niemeyer Stephan Busemann - Anna Katrina Dominguez + Rafał Gałczyński Christian Muirhead James Lan shoma hosaka @@ -219,6 +225,7 @@ Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + w31rd0 Jim Baker Rodrigo Araújo Armin Ronacher @@ -234,12 +241,12 @@ Even Wiik Thomassen jbs soareschen + Mike Bayer Flavio Percoco Kristoffer Kleine yasirs Michael Chermside Anna Ravencroft - Andrew Chambers Julien Phalip Dan Loewenherz diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -137,7 +137,8 @@ control flow of a function (such as ``while`` and ``try`` constructs) - a value stack where bytecode interpretation pulls object - from and puts results on. + from and puts results on. (``locals_stack_w`` is actually a single + list containing both the local scope and the value stack.) - a reference to the *globals* dictionary, containing module-level name-value bindings @@ -151,10 +152,7 @@ - the class ``PyFrame`` is defined in `pypy/interpreter/pyframe.py`_. -- the file `pypy/interpreter/pyopcode.py`_ add support for all Python opcode. - -- nested scope support is added to the ``PyFrame`` class in - `pypy/interpreter/nestedscope.py`_. +- the file `pypy/interpreter/pyopcode.py`_ add support for all Python opcodes. .. _Code: @@ -184,12 +182,6 @@ * ``co_name`` name of the code object (often the function name) * ``co_lnotab`` a helper table to compute the line-numbers corresponding to bytecodes -In PyPy, code objects also have the responsibility of creating their Frame_ objects -via the `'create_frame()`` method. With proper parser and compiler support this would -allow to create custom Frame objects extending the execution of functions -in various ways. The several Frame_ classes already utilize this flexibility -in order to implement Generators and Nested Scopes. - .. _Function: Function and Method classes diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -90,9 +90,6 @@ collectors can be written for specialized purposes, or even various experiments can be done for the general purpose. Examples: -* An incremental garbage collector that has specified maximal pause times, - crucial for games - * A garbage collector that compact memory better for mobile devices * A concurrent garbage collector (a lot of work) @@ -158,6 +155,8 @@ Embedding PyPy ---------------------------------------- +Note: there is a basic proof-of-concept for that as a `uwsgi pypy plugin`_ + Being able to embed PyPy, say with its own limited C API, would be useful. But here is the most interesting variant, straight from EuroPython live discussion :-) We can have a generic "libpypy.so" that @@ -166,6 +165,8 @@ exported. This would give us a one-size-fits-all generic .so file to be imported by any application that wants to load .so files :-) +.. _`uwsgi pypy plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html + Optimising cpyext (CPython C-API compatibility layer) ----------------------------------------------------- diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py --- a/pypy/doc/tool/makecontributor.py +++ b/pypy/doc/tool/makecontributor.py @@ -1,3 +1,5 @@ +# NOTE: run this script with LANG=en_US.UTF-8 + import py import sys from collections import defaultdict @@ -132,7 +134,7 @@ if show_numbers: print '%5d %s' % (n, name) else: - print name + print ' ' + name if __name__ == '__main__': show_numbers = '-n' in sys.argv diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -91,6 +91,7 @@ .. branch: safe-win-mmap .. branch: boolean-indexing-cleanup .. branch: cpyext-best_base +.. branch: cpyext-int .. branch: fileops2 .. branch: nobold-backtrace diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -118,14 +118,10 @@ The sqlite3 database library ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Download http://www.sqlite.org/2013/sqlite-amalgamation-3071601.zip and extract -it into a directory under the base directory. Also get -http://www.sqlite.org/2013/sqlite-dll-win32-x86-3071601.zip and extract the dll -into the bin directory, and the sqlite3.def into the sources directory. -Now build the import library so cffi can use the header and dll:: +PyPy uses cffi to interact with sqlite3.dll. Only the dll is needed, the cffi +wrapper is compiled when the module is imported for the first time. +The sqlite3.dll should be version 3.6.21 for CPython2.7 compatablility. - lib /DEF:sqlite3.def" /OUT:sqlite3.lib" - copy sqlite3.lib path\to\libs The expat XML parser diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -56,7 +56,7 @@ interrupted = [] print('--- start ---') thread.start_new_thread(subthread, ()) - for j in range(10): + for j in range(30): if len(done): break print('.') time.sleep(0.25) diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -7,7 +7,7 @@ appleveldefs = { } interpleveldefs = { - '__version__': 'space.wrap("0.7")', + '__version__': 'space.wrap("0.8")', 'load_library': 'libraryobj.load_library', diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -19,9 +19,9 @@ _cdata = lltype.nullptr(rffi.CCHARP.TO) def __init__(self, space, cdata, ctype): - from pypy.module._cffi_backend import ctypeprim + from pypy.module._cffi_backend import ctypeobj assert lltype.typeOf(cdata) == rffi.CCHARP - assert isinstance(ctype, ctypeprim.W_CType) + assert isinstance(ctype, ctypeobj.W_CType) self.space = space self._cdata = cdata # don't forget keepalive_until_here! self.ctype = ctype @@ -211,7 +211,21 @@ keepalive_until_here(w_value) return # + # A fast path for [0:N] = "somestring". + from pypy.module._cffi_backend import ctypeprim space = self.space + if (space.isinstance_w(w_value, space.w_str) and + isinstance(ctitem, ctypeprim.W_CTypePrimitiveChar)): + from rpython.rtyper.annlowlevel import llstr + from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw + value = space.str_w(w_value) + if len(value) != length: + raise operationerrfmt(space.w_ValueError, + "need a string of length %d, got %d", + length, len(value)) + copy_string_to_raw(llstr(value), cdata, 0, length) + return + # w_iter = space.iter(w_value) for i in range(length): try: @@ -245,19 +259,22 @@ space = self.space if isinstance(w_other, W_CData): from pypy.module._cffi_backend import ctypeptr, ctypearray + from pypy.module._cffi_backend import ctypevoid ct = w_other.ctype if isinstance(ct, ctypearray.W_CTypeArray): ct = ct.ctptr # if (ct is not self.ctype or not isinstance(ct, ctypeptr.W_CTypePointer) or - ct.ctitem.size <= 0): + (ct.ctitem.size <= 0 and not ct.is_void_ptr)): raise operationerrfmt(space.w_TypeError, "cannot subtract cdata '%s' and cdata '%s'", self.ctype.name, ct.name) # + itemsize = ct.ctitem.size + if itemsize <= 0: itemsize = 1 diff = (rffi.cast(lltype.Signed, self._cdata) - - rffi.cast(lltype.Signed, w_other._cdata)) // ct.ctitem.size + rffi.cast(lltype.Signed, w_other._cdata)) // itemsize return space.wrap(diff) # return self._add_or_sub(w_other, -1) @@ -441,6 +458,7 @@ __getitem__ = interp2app(W_CData.getitem), __setitem__ = interp2app(W_CData.setitem), __add__ = interp2app(W_CData.add), + __radd__ = interp2app(W_CData.add), __sub__ = interp2app(W_CData.sub), __getattr__ = interp2app(W_CData.getattr), __setattr__ = interp2app(W_CData.setattr), diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -34,19 +34,8 @@ datasize = self.size # if datasize < 0: - if (space.isinstance_w(w_init, space.w_list) or - space.isinstance_w(w_init, space.w_tuple)): - length = space.int_w(space.len(w_init)) - elif space.isinstance_w(w_init, space.w_basestring): - # from a string, we add the null terminator - length = space.int_w(space.len(w_init)) + 1 - else: - length = space.getindex_w(w_init, space.w_OverflowError) - if length < 0: - raise OperationError(space.w_ValueError, - space.wrap("negative array length")) - w_init = space.w_None - # + from pypy.module._cffi_backend import misc + w_init, length = misc.get_new_array_length(space, w_init) try: datasize = ovfcheck(length * self.ctitem.size) except OverflowError: diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -15,15 +15,12 @@ class W_CTypePtrOrArray(W_CType): - _attrs_ = ['ctitem', 'can_cast_anything', 'is_struct_ptr', - 'length'] - _immutable_fields_ = ['ctitem', 'can_cast_anything', 'is_struct_ptr', - 'length'] + _attrs_ = ['ctitem', 'can_cast_anything', 'length'] + _immutable_fields_ = ['ctitem', 'can_cast_anything', 'length'] length = -1 def __init__(self, space, size, extra, extra_position, ctitem, could_cast_anything=True): - from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion name, name_position = ctitem.insert_name(extra, extra_position) W_CType.__init__(self, space, size, name, name_position) # this is the "underlying type": @@ -32,7 +29,6 @@ # - for functions, it is the return type self.ctitem = ctitem self.can_cast_anything = could_cast_anything and ctitem.cast_anything - self.is_struct_ptr = isinstance(ctitem, W_CTypeStructOrUnion) def is_char_ptr_or_array(self): return isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveChar) @@ -195,6 +191,7 @@ W_CTypePtrBase.__init__(self, space, size, extra, 2, ctitem) def newp(self, w_init): + from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion space = self.space ctitem = self.ctitem datasize = ctitem.size @@ -202,10 +199,15 @@ raise operationerrfmt(space.w_TypeError, "cannot instantiate ctype '%s' of unknown size", self.name) - if self.is_struct_ptr: + if isinstance(ctitem, W_CTypeStructOrUnion): # 'newp' on a struct-or-union pointer: in this case, we return # a W_CDataPtrToStruct object which has a strong reference # to a W_CDataNewOwning that really contains the structure. + # + if ctitem.with_var_array and not space.is_w(w_init, space.w_None): + datasize = ctitem.convert_struct_from_object( + lltype.nullptr(rffi.CCHARP.TO), w_init, datasize) + # cdatastruct = cdataobj.W_CDataNewOwning(space, datasize, ctitem) cdata = cdataobj.W_CDataPtrToStructOrUnion(space, cdatastruct._cdata, @@ -236,11 +238,15 @@ def add(self, cdata, i): space = self.space ctitem = self.ctitem + itemsize = ctitem.size if ctitem.size < 0: - raise operationerrfmt(space.w_TypeError, + if self.is_void_ptr: + itemsize = 1 + else: + raise operationerrfmt(space.w_TypeError, "ctype '%s' points to items of unknown size", self.name) - p = rffi.ptradd(cdata, i * self.ctitem.size) + p = rffi.ptradd(cdata, i * itemsize) return cdataobj.W_CData(space, p, self) def cast(self, w_ob): @@ -317,7 +323,8 @@ space = self.space ctype2 = cdata.ctype if (isinstance(ctype2, W_CTypeStructOrUnion) or - (isinstance(ctype2, W_CTypePtrOrArray) and ctype2.is_struct_ptr)): + (isinstance(ctype2, W_CTypePtrOrArray) and + isinstance(ctype2.ctitem, W_CTypeStructOrUnion))): ptrdata = rffi.ptradd(cdata._cdata, offset) return cdataobj.W_CData(space, ptrdata, self) else: diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -9,7 +9,8 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, intmask -from rpython.rtyper.lltypesystem import rffi +from rpython.rlib.rarithmetic import ovfcheck +from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module._cffi_backend import cdataobj, ctypeprim, misc from pypy.module._cffi_backend.ctypeobj import W_CType @@ -17,12 +18,13 @@ class W_CTypeStructOrUnion(W_CType): _immutable_fields_ = ['alignment?', 'fields_list?', 'fields_dict?', - 'custom_field_pos?'] + 'custom_field_pos?', 'with_var_array?'] # fields added by complete_struct_or_union(): alignment = -1 fields_list = None fields_dict = None custom_field_pos = False + with_var_array = False def __init__(self, space, name): W_CType.__init__(self, space, -1, name, len(name)) @@ -90,12 +92,16 @@ pass def convert_from_object(self, cdata, w_ob): - space = self.space - if self._copy_from_same(cdata, w_ob): - return + if not self._copy_from_same(cdata, w_ob): + self.convert_struct_from_object(cdata, w_ob, optvarsize=-1) + @jit.look_inside_iff( + lambda self, cdata, w_ob, optvarsize: jit.isvirtual(w_ob) + ) + def convert_struct_from_object(self, cdata, w_ob, optvarsize): self._check_only_one_argument_for_union(w_ob) + space = self.space if (space.isinstance_w(w_ob, space.w_list) or space.isinstance_w(w_ob, space.w_tuple)): lst_w = space.listview(w_ob) @@ -104,7 +110,9 @@ "too many initializers for '%s' (got %d)", self.name, len(lst_w)) for i in range(len(lst_w)): - self.fields_list[i].write(cdata, lst_w[i]) + optvarsize = self.fields_list[i].write_v(cdata, lst_w[i], + optvarsize) + return optvarsize elif space.isinstance_w(w_ob, space.w_dict): lst_w = space.fixedview(w_ob) @@ -116,11 +124,16 @@ except KeyError: space.raise_key_error(w_key) assert 0 - cf.write(cdata, space.getitem(w_ob, w_key)) + optvarsize = cf.write_v(cdata, space.getitem(w_ob, w_key), + optvarsize) + return optvarsize else: - raise self._convert_error("list or tuple or dict or struct-cdata", - w_ob) + if optvarsize == -1: + msg = "list or tuple or dict or struct-cdata" + else: + msg = "list or tuple or dict" + raise self._convert_error(msg, w_ob) @jit.elidable def _getcfield_const(self, attr): @@ -192,6 +205,37 @@ else: self.ctype.convert_from_object(cdata, w_ob) + def write_v(self, cdata, w_ob, optvarsize): + # a special case for var-sized C99 arrays + from pypy.module._cffi_backend import ctypearray + ct = self.ctype + if isinstance(ct, ctypearray.W_CTypeArray) and ct.length < 0: + space = ct.space + w_ob, varsizelength = misc.get_new_array_length(space, w_ob) + if optvarsize != -1: + # in this mode, the only purpose of this function is to compute + # the real size of the structure from a var-sized C99 array + assert cdata == lltype.nullptr(rffi.CCHARP.TO) + itemsize = ct.ctitem.size + try: + varsize = ovfcheck(itemsize * varsizelength) + size = ovfcheck(self.offset + varsize) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("array size would overflow a ssize_t")) + assert size >= 0 + return max(size, optvarsize) + # if 'value' was only an integer, get_new_array_length() returns + # w_ob = space.w_None. Detect if this was the case, + # and if so, stop here, leaving the content uninitialized + # (it should be zero-initialized from somewhere else). + if space.is_w(w_ob, space.w_None): + return optvarsize + # + if optvarsize == -1: + self.write(cdata, w_ob) + return optvarsize + def convert_bitfield_to_object(self, cdata): ctype = self.ctype space = ctype.space diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py --- a/pypy/module/_cffi_backend/misc.py +++ b/pypy/module/_cffi_backend/misc.py @@ -278,6 +278,22 @@ # ____________________________________________________________ +def get_new_array_length(space, w_value): + if (space.isinstance_w(w_value, space.w_list) or + space.isinstance_w(w_value, space.w_tuple)): + return (w_value, space.int_w(space.len(w_value))) + elif space.isinstance_w(w_value, space.w_basestring): + # from a string, we add the null terminator + return (w_value, space.int_w(space.len(w_value)) + 1) + else: + explicitlength = space.getindex_w(w_value, space.w_OverflowError) + if explicitlength < 0: + raise OperationError(space.w_ValueError, + space.wrap("negative array length")) + return (space.w_None, explicitlength) + +# ____________________________________________________________ + @specialize.arg(0) def _raw_memcopy_tp(TPP, source, dest): # in its own function: LONGLONG may make the whole function jit-opaque diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -158,8 +158,10 @@ fields_list = [] fields_dict = {} custom_field_pos = False + with_var_array = False - for w_field in fields_w: + for i in range(len(fields_w)): + w_field = fields_w[i] field_w = space.fixedview(w_field) if not (2 <= len(field_w) <= 4): raise OperationError(space.w_TypeError, @@ -176,7 +178,11 @@ "duplicate field name '%s'", fname) # if ftype.size < 0: - raise operationerrfmt(space.w_TypeError, + if (isinstance(ftype, ctypearray.W_CTypeArray) and fbitsize < 0 + and (i == len(fields_w) - 1 or foffset != -1)): + with_var_array = True + else: + raise operationerrfmt(space.w_TypeError, "field '%s.%s' has ctype '%s' of unknown size", w_ctype.name, fname, ftype.name) # @@ -235,7 +241,8 @@ fields_list.append(fld) fields_dict[fname] = fld - boffset += ftype.size * 8 + if ftype.size >= 0: + boffset += ftype.size * 8 From noreply at buildbot.pypy.org Thu Nov 14 09:33:54 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 09:33:54 +0100 (CET) Subject: [pypy-commit] pypy numpy-newbyteorder: move size from types to dtype Message-ID: <20131114083354.752D31C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-newbyteorder Changeset: r68017:8c6fe040d6c7 Date: 2013-11-14 01:28 -0500 http://bitbucket.org/pypy/pypy/changeset/8c6fe040d6c7/ Log: move size from types to dtype diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -52,7 +52,7 @@ loop.setslice(space, shape, self, impl) def get_size(self): - return self.size // self.dtype.itemtype.get_element_size() + return self.size // self.dtype.get_size() def get_storage_size(self): return self.size @@ -399,7 +399,7 @@ self.storage = parent.storage self.order = parent.order self.dtype = dtype - self.size = support.product(shape) * self.dtype.itemtype.get_element_size() + self.size = support.product(shape) * self.dtype.get_size() self.start = start self.orig_arr = orig_arr diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -38,7 +38,7 @@ _immutable_fields_ = ["itemtype?", "num", "kind", "name?", "char", "w_box_type", "byteorder", "float_type"] def __init__(self, itemtype, num, kind, name, char, w_box_type, byteorder=NPY_NATIVE, - alternate_constructors=[], aliases=[], float_type=None, + size=1, alternate_constructors=[], aliases=[], float_type=None, fields=None, fieldnames=None, shape=[], subdtype=None): self.itemtype = itemtype self.num = num @@ -47,6 +47,7 @@ self.char = char self.w_box_type = w_box_type self.byteorder = byteorder + self.size = size self.alternate_constructors = alternate_constructors self.aliases = aliases self.float_type = float_type @@ -123,7 +124,7 @@ return self.byteorder in (NPY_NATIVE, NPY_NATBYTE) def get_size(self): - return self.itemtype.get_element_size() + return self.size * self.itemtype.get_element_size() def get_name(self): if self.char == 'S': @@ -137,7 +138,7 @@ return space.wrap("dtype('%s')" % self.get_name()) def descr_get_itemsize(self, space): - return space.wrap(self.itemtype.get_element_size()) + return space.wrap(self.get_size()) def descr_get_alignment(self, space): return space.wrap(self.itemtype.alignment) @@ -209,10 +210,11 @@ self.fields[space.str_w(key)] = offset, dtype ofs_and_items.append((offset, dtype.itemtype)) - size += dtype.itemtype.get_element_size() + size += dtype.get_size() - self.itemtype = types.RecordType(ofs_and_items, size) - self.name = "void" + str(8 * self.itemtype.get_element_size()) + self.itemtype = types.RecordType(ofs_and_items) + self.size = size + self.name = "void" + str(8 * self.get_size()) def descr_get_names(self, space): if self.fieldnames is None: @@ -264,7 +266,7 @@ w_class = space.type(self) kind = self.kind - elemsize = self.itemtype.get_element_size() + elemsize = self.get_size() builder_args = space.newtuple([space.wrap("%s%d" % (kind, elemsize)), space.wrap(0), space.wrap(1)]) version = space.wrap(3) @@ -319,7 +321,8 @@ elif newendian != NPY_IGNORE: endian = newendian itemtype = self.itemtype.__class__(endian in (NPY_NATIVE, NPY_NATBYTE)) - return W_Dtype(itemtype, self.num, self.kind, self.name, self.char, self.w_box_type, endian) + return W_Dtype(itemtype, self.num, self.kind, self.name, self.char, + self.w_box_type, endian, size=self.size) def dtype_from_list(space, w_lst): lst_w = space.listview(w_lst) @@ -343,12 +346,13 @@ assert isinstance(subdtype, W_Dtype) fields[fldname] = (offset, subdtype) ofs_and_items.append((offset, subdtype.itemtype)) - offset += subdtype.itemtype.get_element_size() * size + offset += subdtype.get_size() * size fieldnames.append(fldname) - itemtype = types.RecordType(ofs_and_items, offset) - return W_Dtype(itemtype, NPY_VOID, NPY_VOIDLTR, "void" + str(8 * itemtype.get_element_size()), - NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), fields=fields, - fieldnames=fieldnames) + itemtype = types.RecordType(ofs_and_items) + return W_Dtype(itemtype, NPY_VOID, NPY_VOIDLTR, + "void" + str(8 * offset * itemtype.get_element_size()), + NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), + fields=fields, fieldnames=fieldnames, size=offset) def dtype_from_dict(space, w_dict): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -362,7 +366,8 @@ # w_align and w_copy are necessary for pickling cache = get_dtype_cache(space) - if w_shape is not None and (space.isinstance_w(w_shape, space.w_int) or space.len_w(w_shape) > 0): + if w_shape is not None and (space.isinstance_w(w_shape, space.w_int) or + space.len_w(w_shape) > 0): subdtype = descr__new__(space, w_subtype, w_dtype, w_align, w_copy) assert isinstance(subdtype, W_Dtype) size = 1 @@ -373,8 +378,11 @@ dim = space.int_w(w_dim) shape.append(dim) size *= dim - return W_Dtype(types.VoidType(subdtype.itemtype.get_element_size() * size), NPY_VOID, NPY_VOIDLTR, "void" + str(8 * subdtype.itemtype.get_element_size() * size), - NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), shape=shape, subdtype=subdtype) + return W_Dtype(types.VoidType(), NPY_VOID, NPY_VOIDLTR, + "void" + str(8 * subdtype.get_size() * size), + NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), + shape=shape, subdtype=subdtype, + size=subdtype.get_size() * size) if space.is_none(w_dtype): return cache.w_float64dtype @@ -464,17 +472,17 @@ size = 1 if char == NPY_STRINGLTR: - itemtype = types.StringType(size) + itemtype = types.StringType() basename = 'string' num = NPY_STRING w_box_type = space.gettypefor(interp_boxes.W_StringBox) elif char == NPY_VOIDLTR: - itemtype = types.VoidType(size) + itemtype = types.VoidType() basename = 'void' num = NPY_VOID w_box_type = space.gettypefor(interp_boxes.W_VoidBox) elif char == NPY_UNICODELTR: - itemtype = types.UnicodeType(size) + itemtype = types.UnicodeType() basename = 'unicode' num = NPY_UNICODE w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox) @@ -482,27 +490,29 @@ assert False return W_Dtype(itemtype, num, char, - basename + str(8 * itemtype.get_element_size()), - char, w_box_type) + basename + str(8 * size * itemtype.get_element_size()), + char, w_box_type, size=size) def new_string_dtype(space, size): - itemtype = types.StringType(size) + itemtype = types.StringType() return W_Dtype( itemtype, + size=size, num=NPY_STRING, kind=NPY_STRINGLTR, - name='string' + str(8 * itemtype.get_element_size()), + name='string' + str(8 * size * itemtype.get_element_size()), char=NPY_STRINGLTR, w_box_type = space.gettypefor(interp_boxes.W_StringBox), ) def new_unicode_dtype(space, size): - itemtype = types.UnicodeType(size) + itemtype = types.UnicodeType() return W_Dtype( itemtype, + size=size, num=NPY_UNICODE, kind=NPY_UNICODELTR, - name='unicode' + str(8 * itemtype.get_element_size()), + name='unicode' + str(8 * size * itemtype.get_element_size()), char=NPY_UNICODELTR, w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), ) @@ -677,7 +687,8 @@ float_type = self.w_floatlongdtype, ) self.w_stringdtype = W_Dtype( - types.StringType(0), + types.StringType(), + size=0, num=NPY_STRING, kind=NPY_STRINGLTR, name='string', @@ -687,7 +698,8 @@ aliases=["str"], ) self.w_unicodedtype = W_Dtype( - types.UnicodeType(0), + types.UnicodeType(), + size=0, num=NPY_UNICODE, kind=NPY_UNICODELTR, name='unicode', @@ -696,7 +708,8 @@ alternate_constructors=[space.w_unicode], ) self.w_voiddtype = W_Dtype( - types.VoidType(0), + types.VoidType(), + size=0, num=NPY_VOID, kind=NPY_VOIDLTR, name='void', @@ -764,7 +777,7 @@ self.w_intpdtype, self.w_uintpdtype, ] self.float_dtypes_by_num_bytes = sorted( - (dtype.itemtype.get_element_size(), dtype) + (dtype.get_size(), dtype) for dtype in float_dtypes ) self.dtypes_by_num = {} @@ -774,7 +787,7 @@ for dtype in reversed(self.builtin_dtypes): self.dtypes_by_num[dtype.num] = dtype self.dtypes_by_name[dtype.name] = dtype - can_name = dtype.kind + str(dtype.itemtype.get_element_size()) + can_name = dtype.kind + str(dtype.get_size()) self.dtypes_by_name[can_name] = dtype self.dtypes_by_name[NPY_NATBYTE + can_name] = dtype self.dtypes_by_name[NPY_NATIVE + can_name] = dtype @@ -844,7 +857,7 @@ for k, v in typeinfo_partial.iteritems(): space.setitem(w_typeinfo, space.wrap(k), space.gettypefor(v)) for k, dtype in typeinfo_full.iteritems(): - itemsize = dtype.itemtype.get_element_size() + itemsize = dtype.get_size() items_w = [space.wrap(dtype.char), space.wrap(dtype.num), space.wrap(itemsize * 8), # in case of changing diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -85,10 +85,10 @@ return space.wrap(len(self.get_shape())) def descr_get_itemsize(self, space): - return space.wrap(self.get_dtype().itemtype.get_element_size()) + return space.wrap(self.get_dtype().get_size()) def descr_get_nbytes(self, space): - return space.wrap(self.get_size() * self.get_dtype().itemtype.get_element_size()) + return space.wrap(self.get_size() * self.get_dtype().get_size()) def descr_fill(self, space, w_value): self.fill(self.get_dtype().coerce(space, w_value)) @@ -1340,7 +1340,7 @@ # not an array or incorrect dtype shape, elems_w = find_shape_and_elems(space, w_object, dtype) if dtype is None or ( - dtype.is_str_or_unicode() and dtype.itemtype.get_size() < 1): + dtype.is_str_or_unicode() and dtype.get_size() < 1): for w_elem in elems_w: dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, dtype) @@ -1349,7 +1349,7 @@ if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype - if dtype.is_str_or_unicode() and dtype.itemtype.get_size() < 1: + if dtype.is_str_or_unicode() and dtype.get_size() < 1: # promote S0 -> S1, U0 -> U1 dtype = interp_dtype.variable_dtype(space, dtype.char + '1') if ndmin > len(shape): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -475,8 +475,7 @@ if dt2.is_record_type(): return dt2 if dt1.is_str_or_unicode(): - if dt2.itemtype.get_element_size() >= \ - dt1.itemtype.get_element_size(): + if dt2.get_size() >= dt1.get_size(): return dt2 return dt1 return dt2 @@ -556,7 +555,7 @@ return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) elif current_guess.num == NPY_STRING: - if current_guess.itemtype.get_size() < space.len_w(w_obj): + if current_guess.get_size() < space.len_w(w_obj): return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -164,7 +164,7 @@ self.array = array self.offset = 0 self.dtype = array.dtype - self.skip = self.dtype.itemtype.get_element_size() + self.skip = self.dtype.get_size() self.size = array.size def setitem(self, elem): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1591,17 +1591,8 @@ ComponentBoxType = interp_boxes.W_FloatLongBox class BaseStringType(BaseType): - _immutable_fields = ['size'] - - def __init__(self, size=0): - BaseType.__init__(self) - self.size = size - def get_element_size(self): - return self.size * rffi.sizeof(self.T) - - def get_size(self): - return self.size + return rffi.sizeof(self.T) def str_unary_op(func): specialize.argtype(1)(func) @@ -1636,13 +1627,13 @@ def store(self, arr, i, offset, box): assert isinstance(box, interp_boxes.W_StringBox) - return self._store(arr.storage, i, offset, box) + size = min(arr.dtype.size, box.arr.size - box.ofs) + return self._store(arr.storage, i, offset, box, size) @jit.unroll_safe - def _store(self, storage, i, offset, box): + def _store(self, storage, i, offset, box, size): assert isinstance(box, interp_boxes.W_StringBox) - # XXX simplify to range(box.dtype.get_size()) ? - for k in range(min(self.size, box.arr.size-box.ofs)): + for k in range(size): storage[k + offset + i] = box.arr.storage[k + box.ofs] def read(self, arr, i, offset, dtype=None): @@ -1725,17 +1716,17 @@ else: w_arg = box.descr_str(space) arg = space.str_w(space.str(w_arg)) - arr = VoidBoxStorage(self.size, mydtype) + arr = VoidBoxStorage(mydtype.size, mydtype) i = 0 - for i in range(min(len(arg), self.size)): + for i in range(min(len(arg), mydtype.size)): arr.storage[i] = arg[i] - for j in range(i + 1, self.size): + for j in range(i + 1, mydtype.size): arr.storage[j] = '\x00' - return interp_boxes.W_StringBox(arr, 0, arr.dtype) + return interp_boxes.W_StringBox(arr, 0, arr.dtype) def fill(self, storage, width, box, start, stop, offset): for i in xrange(start, stop, width): - self._store(storage, i, offset, box) + self._store(storage, i, offset, box, width) class UnicodeType(BaseStringType): T = lltype.UniChar @@ -1772,14 +1763,14 @@ ofs += size def coerce(self, space, dtype, w_items): - arr = VoidBoxStorage(self.size, dtype) + arr = VoidBoxStorage(dtype.get_size(), dtype) self._coerce(space, arr, 0, dtype, w_items, dtype.shape) return interp_boxes.W_VoidBox(arr, 0, dtype) @jit.unroll_safe def store(self, arr, i, ofs, box): assert isinstance(box, interp_boxes.W_VoidBox) - for k in range(self.get_element_size()): + for k in range(box.arr.dtype.get_size()): arr.storage[k + ofs] = box.arr.storage[k + box.ofs] def readarray(self, arr, i, offset, dtype=None): @@ -1793,15 +1784,14 @@ class RecordType(BaseType): T = lltype.Char - _immutable_fields_ = ['offsets_and_fields', 'size'] + _immutable_fields_ = ['offsets_and_fields'] - def __init__(self, offsets_and_fields, size): + def __init__(self, offsets_and_fields): BaseType.__init__(self) self.offsets_and_fields = offsets_and_fields - self.size = size def get_element_size(self): - return self.size + return rffi.sizeof(self.T) def read(self, arr, i, offset, dtype=None): if dtype is None: @@ -1821,7 +1811,7 @@ raise OperationError(space.w_ValueError, space.wrap( "wrong length")) items_w = space.fixedview(w_item) - arr = VoidBoxStorage(self.size, dtype) + arr = VoidBoxStorage(dtype.get_size(), dtype) for i in range(len(items_w)): subdtype = dtype.fields[dtype.fieldnames[i]][1] ofs, itemtype = self.offsets_and_fields[i] @@ -1833,7 +1823,7 @@ @jit.unroll_safe def store(self, arr, i, ofs, box): assert isinstance(box, interp_boxes.W_VoidBox) - for k in range(self.get_element_size()): + for k in range(box.arr.dtype.get_size()): arr.storage[k + i] = box.arr.storage[k + box.ofs] @jit.unroll_safe From noreply at buildbot.pypy.org Thu Nov 14 09:33:55 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 09:33:55 +0100 (CET) Subject: [pypy-commit] pypy numpy-newbyteorder: remove copy of offsets_and_fields on RecordType Message-ID: <20131114083355.98F8C1C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-newbyteorder Changeset: r68018:fd7f87a0d522 Date: 2013-11-14 03:00 -0500 http://bitbucket.org/pypy/pypy/changeset/fd7f87a0d522/ Log: remove copy of offsets_and_fields on RecordType diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -198,7 +198,6 @@ self.fields = None else: self.fields = {} - ofs_and_items = [] size = 0 for key in space.listview(w_fields): value = space.getitem(w_fields, key) @@ -209,10 +208,9 @@ offset = space.int_w(space.getitem(value, space.wrap(1))) self.fields[space.str_w(key)] = offset, dtype - ofs_and_items.append((offset, dtype.itemtype)) size += dtype.get_size() - self.itemtype = types.RecordType(ofs_and_items) + self.itemtype = types.RecordType() self.size = size self.name = "void" + str(8 * self.get_size()) @@ -328,7 +326,6 @@ lst_w = space.listview(w_lst) fields = {} offset = 0 - ofs_and_items = [] fieldnames = [] for w_elem in lst_w: size = 1 @@ -345,10 +342,9 @@ raise OperationError(space.w_ValueError, space.wrap("two fields with the same name")) assert isinstance(subdtype, W_Dtype) fields[fldname] = (offset, subdtype) - ofs_and_items.append((offset, subdtype.itemtype)) offset += subdtype.get_size() * size fieldnames.append(fldname) - itemtype = types.RecordType(ofs_and_items) + itemtype = types.RecordType() return W_Dtype(itemtype, NPY_VOID, NPY_VOIDLTR, "void" + str(8 * offset * itemtype.get_element_size()), NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1784,11 +1784,6 @@ class RecordType(BaseType): T = lltype.Char - _immutable_fields_ = ['offsets_and_fields'] - - def __init__(self, offsets_and_fields): - BaseType.__init__(self) - self.offsets_and_fields = offsets_and_fields def get_element_size(self): return rffi.sizeof(self.T) @@ -1807,14 +1802,14 @@ if not space.issequence_w(w_item): raise OperationError(space.w_TypeError, space.wrap( "expected sequence")) - if len(self.offsets_and_fields) != space.len_w(w_item): + if len(dtype.fields) != space.len_w(w_item): raise OperationError(space.w_ValueError, space.wrap( "wrong length")) items_w = space.fixedview(w_item) arr = VoidBoxStorage(dtype.get_size(), dtype) for i in range(len(items_w)): - subdtype = dtype.fields[dtype.fieldnames[i]][1] - ofs, itemtype = self.offsets_and_fields[i] + ofs, subdtype = dtype.fields[dtype.fieldnames[i]] + itemtype = subdtype.itemtype w_item = items_w[i] w_box = itemtype.coerce(space, subdtype, w_item) itemtype.store(arr, 0, ofs, w_box) @@ -1831,7 +1826,9 @@ assert isinstance(box, interp_boxes.W_VoidBox) pieces = ["("] first = True - for ofs, tp in self.offsets_and_fields: + for name in box.dtype.fieldnames: + ofs, subdtype = box.dtype.fields[name] + tp = subdtype.itemtype if first: first = False else: From noreply at buildbot.pypy.org Thu Nov 14 09:33:56 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 09:33:56 +0100 (CET) Subject: [pypy-commit] pypy numpy-newbyteorder: provide ndarray.newbyteorder() Message-ID: <20131114083356.DAAF71C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-newbyteorder Changeset: r68019:303889d4a9d6 Date: 2013-11-14 03:23 -0500 http://bitbucket.org/pypy/pypy/changeset/303889d4a9d6/ Log: provide ndarray.newbyteorder() diff --git a/pypy/module/micronumpy/conversion_utils.py b/pypy/module/micronumpy/conversion_utils.py --- a/pypy/module/micronumpy/conversion_utils.py +++ b/pypy/module/micronumpy/conversion_utils.py @@ -63,6 +63,7 @@ raise OperationError(space.w_TypeError, space.wrap( "order not understood")) + def multi_axis_converter(space, w_axis, ndim): if space.is_none(w_axis): return [True] * ndim diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -625,10 +625,10 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "itemset not implemented yet")) - @unwrap_spec(neworder=str) - def descr_newbyteorder(self, space, neworder): - raise OperationError(space.w_NotImplementedError, space.wrap( - "newbyteorder not implemented yet")) + @unwrap_spec(new_order=str) + def descr_newbyteorder(self, space, new_order=NPY_SWAP): + return self.descr_view(space, + self.get_dtype().descr_newbyteorder(space, new_order)) @unwrap_spec(w_axis=WrappedDefault(None), w_out=WrappedDefault(None)) @@ -1268,6 +1268,7 @@ diagonal = interp2app(W_NDimArray.descr_diagonal), trace = interp2app(W_NDimArray.descr_trace), view = interp2app(W_NDimArray.descr_view), + newbyteorder = interp2app(W_NDimArray.descr_newbyteorder), ctypes = GetSetProperty(W_NDimArray.descr_get_ctypes), # XXX unimplemented __array_interface__ = GetSetProperty(W_NDimArray.descr_array_iface), diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2928,6 +2928,15 @@ assert str(a.dtype) == '|S1' assert a == 'x' + def test_newbyteorder(self): + import numpy as np + a = np.array([1, 2], dtype=np.int16) + b = a.newbyteorder() + assert (b == [256, 512]).all() + c = b.byteswap() + assert (c == [1, 2]).all() + assert (a == [1, 2]).all() + def test_pickle(self): from numpypy import dtype, array from cPickle import loads, dumps From noreply at buildbot.pypy.org Thu Nov 14 09:33:58 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 09:33:58 +0100 (CET) Subject: [pypy-commit] pypy numpy-newbyteorder: update immutable_fields on dtype Message-ID: <20131114083358.16CFC1C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-newbyteorder Changeset: r68020:022135968606 Date: 2013-11-14 03:29 -0500 http://bitbucket.org/pypy/pypy/changeset/022135968606/ Log: update immutable_fields on dtype diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -35,7 +35,9 @@ return out class W_Dtype(W_Root): - _immutable_fields_ = ["itemtype?", "num", "kind", "name?", "char", "w_box_type", "byteorder", "float_type"] + _immutable_fields_ = ["itemtype?", "num", "kind", "name?", "char", + "w_box_type", "byteorder", "size?", "float_type", + "fields?", "fieldnames?", "shape", "subdtype", "base"] def __init__(self, itemtype, num, kind, name, char, w_box_type, byteorder=NPY_NATIVE, size=1, alternate_constructors=[], aliases=[], float_type=None, From noreply at buildbot.pypy.org Thu Nov 14 09:33:59 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 09:33:59 +0100 (CET) Subject: [pypy-commit] pypy default: merge numpy-newbyteorder Message-ID: <20131114083359.53B6F1C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68021:a80ac0e94c7e Date: 2013-11-14 03:30 -0500 http://bitbucket.org/pypy/pypy/changeset/a80ac0e94c7e/ Log: merge numpy-newbyteorder diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -52,7 +52,7 @@ loop.setslice(space, shape, self, impl) def get_size(self): - return self.size // self.dtype.itemtype.get_element_size() + return self.size // self.dtype.get_size() def get_storage_size(self): return self.size @@ -399,7 +399,7 @@ self.storage = parent.storage self.order = parent.order self.dtype = dtype - self.size = support.product(shape) * self.dtype.itemtype.get_element_size() + self.size = support.product(shape) * self.dtype.get_size() self.start = start self.orig_arr = orig_arr diff --git a/pypy/module/micronumpy/conversion_utils.py b/pypy/module/micronumpy/conversion_utils.py --- a/pypy/module/micronumpy/conversion_utils.py +++ b/pypy/module/micronumpy/conversion_utils.py @@ -1,6 +1,27 @@ from pypy.interpreter.error import OperationError from pypy.module.micronumpy.constants import * + +def byteorder_converter(space, new_order): + endian = new_order[0] + if endian not in (NPY_BIG, NPY_LITTLE, NPY_NATIVE, NPY_IGNORE, NPY_SWAP): + ch = endian + if ch in ('b', 'B'): + endian = NPY_BIG + elif ch in ('l', 'L'): + endian = NPY_LITTLE + elif ch in ('n', 'N'): + endian = NPY_NATIVE + elif ch in ('i', 'I'): + endian = NPY_IGNORE + elif ch in ('s', 'S'): + endian = NPY_SWAP + else: + raise OperationError(space.w_ValueError, space.wrap( + "%s is an unrecognized byteorder" % new_order)) + return endian + + def clipmode_converter(space, w_mode): if space.is_none(w_mode): return NPY_RAISE @@ -19,6 +40,7 @@ raise OperationError(space.w_TypeError, space.wrap("clipmode not understood")) + def order_converter(space, w_order, default): if space.is_none(w_order): return default @@ -41,6 +63,7 @@ raise OperationError(space.w_TypeError, space.wrap( "order not understood")) + def multi_axis_converter(space, w_axis, ndim): if space.is_none(w_axis): return [True] * ndim diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -9,6 +9,7 @@ from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong from rpython.rtyper.lltypesystem import rffi from rpython.rlib import jit +from pypy.module.micronumpy.conversion_utils import byteorder_converter from pypy.module.micronumpy.constants import * @@ -34,10 +35,12 @@ return out class W_Dtype(W_Root): - _immutable_fields_ = ["itemtype?", "num", "kind", "name?", "char", "w_box_type", "byteorder", "float_type"] + _immutable_fields_ = ["itemtype?", "num", "kind", "name?", "char", + "w_box_type", "byteorder", "size?", "float_type", + "fields?", "fieldnames?", "shape", "subdtype", "base"] def __init__(self, itemtype, num, kind, name, char, w_box_type, byteorder=NPY_NATIVE, - alternate_constructors=[], aliases=[], float_type=None, + size=1, alternate_constructors=[], aliases=[], float_type=None, fields=None, fieldnames=None, shape=[], subdtype=None): self.itemtype = itemtype self.num = num @@ -46,6 +49,7 @@ self.char = char self.w_box_type = w_box_type self.byteorder = byteorder + self.size = size self.alternate_constructors = alternate_constructors self.aliases = aliases self.float_type = float_type @@ -122,7 +126,7 @@ return self.byteorder in (NPY_NATIVE, NPY_NATBYTE) def get_size(self): - return self.itemtype.get_element_size() + return self.size * self.itemtype.get_element_size() def get_name(self): if self.char == 'S': @@ -136,7 +140,7 @@ return space.wrap("dtype('%s')" % self.get_name()) def descr_get_itemsize(self, space): - return space.wrap(self.itemtype.get_element_size()) + return space.wrap(self.get_size()) def descr_get_alignment(self, space): return space.wrap(self.itemtype.alignment) @@ -196,7 +200,6 @@ self.fields = None else: self.fields = {} - ofs_and_items = [] size = 0 for key in space.listview(w_fields): value = space.getitem(w_fields, key) @@ -207,11 +210,11 @@ offset = space.int_w(space.getitem(value, space.wrap(1))) self.fields[space.str_w(key)] = offset, dtype - ofs_and_items.append((offset, dtype.itemtype)) - size += dtype.itemtype.get_element_size() + size += dtype.get_size() - self.itemtype = types.RecordType(ofs_and_items, size) - self.name = "void" + str(8 * self.itemtype.get_element_size()) + self.itemtype = types.RecordType() + self.size = size + self.name = "void" + str(8 * self.get_size()) def descr_get_names(self, space): if self.fieldnames is None: @@ -263,7 +266,7 @@ w_class = space.type(self) kind = self.kind - elemsize = self.itemtype.get_element_size() + elemsize = self.get_size() builder_args = space.newtuple([space.wrap("%s%d" % (kind, elemsize)), space.wrap(0), space.wrap(1)]) version = space.wrap(3) @@ -308,11 +311,23 @@ fields = space.getitem(w_data, space.wrap(4)) self.set_fields(space, fields) + @unwrap_spec(new_order=str) + def descr_newbyteorder(self, space, new_order=NPY_SWAP): + newendian = byteorder_converter(space, new_order) + endian = self.byteorder + if endian != NPY_IGNORE: + if newendian == NPY_SWAP: + endian = NPY_OPPBYTE if self.is_native() else NPY_NATBYTE + elif newendian != NPY_IGNORE: + endian = newendian + itemtype = self.itemtype.__class__(endian in (NPY_NATIVE, NPY_NATBYTE)) + return W_Dtype(itemtype, self.num, self.kind, self.name, self.char, + self.w_box_type, endian, size=self.size) + def dtype_from_list(space, w_lst): lst_w = space.listview(w_lst) fields = {} offset = 0 - ofs_and_items = [] fieldnames = [] for w_elem in lst_w: size = 1 @@ -329,13 +344,13 @@ raise OperationError(space.w_ValueError, space.wrap("two fields with the same name")) assert isinstance(subdtype, W_Dtype) fields[fldname] = (offset, subdtype) - ofs_and_items.append((offset, subdtype.itemtype)) - offset += subdtype.itemtype.get_element_size() * size + offset += subdtype.get_size() * size fieldnames.append(fldname) - itemtype = types.RecordType(ofs_and_items, offset) - return W_Dtype(itemtype, NPY_VOID, NPY_VOIDLTR, "void" + str(8 * itemtype.get_element_size()), - NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), fields=fields, - fieldnames=fieldnames) + itemtype = types.RecordType() + return W_Dtype(itemtype, NPY_VOID, NPY_VOIDLTR, + "void" + str(8 * offset * itemtype.get_element_size()), + NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), + fields=fields, fieldnames=fieldnames, size=offset) def dtype_from_dict(space, w_dict): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -349,7 +364,8 @@ # w_align and w_copy are necessary for pickling cache = get_dtype_cache(space) - if w_shape is not None and (space.isinstance_w(w_shape, space.w_int) or space.len_w(w_shape) > 0): + if w_shape is not None and (space.isinstance_w(w_shape, space.w_int) or + space.len_w(w_shape) > 0): subdtype = descr__new__(space, w_subtype, w_dtype, w_align, w_copy) assert isinstance(subdtype, W_Dtype) size = 1 @@ -360,8 +376,11 @@ dim = space.int_w(w_dim) shape.append(dim) size *= dim - return W_Dtype(types.VoidType(subdtype.itemtype.get_element_size() * size), NPY_VOID, NPY_VOIDLTR, "void" + str(8 * subdtype.itemtype.get_element_size() * size), - NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), shape=shape, subdtype=subdtype) + return W_Dtype(types.VoidType(), NPY_VOID, NPY_VOIDLTR, + "void" + str(8 * subdtype.get_size() * size), + NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), + shape=shape, subdtype=subdtype, + size=subdtype.get_size() * size) if space.is_none(w_dtype): return cache.w_float64dtype @@ -413,6 +432,7 @@ __reduce__ = interp2app(W_Dtype.descr_reduce), __setstate__ = interp2app(W_Dtype.descr_setstate), + newbyteorder = interp2app(W_Dtype.descr_newbyteorder), type = interp_attrproperty_w("w_box_type", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), @@ -450,17 +470,17 @@ size = 1 if char == NPY_STRINGLTR: - itemtype = types.StringType(size) + itemtype = types.StringType() basename = 'string' num = NPY_STRING w_box_type = space.gettypefor(interp_boxes.W_StringBox) elif char == NPY_VOIDLTR: - itemtype = types.VoidType(size) + itemtype = types.VoidType() basename = 'void' num = NPY_VOID w_box_type = space.gettypefor(interp_boxes.W_VoidBox) elif char == NPY_UNICODELTR: - itemtype = types.UnicodeType(size) + itemtype = types.UnicodeType() basename = 'unicode' num = NPY_UNICODE w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox) @@ -468,27 +488,29 @@ assert False return W_Dtype(itemtype, num, char, - basename + str(8 * itemtype.get_element_size()), - char, w_box_type) + basename + str(8 * size * itemtype.get_element_size()), + char, w_box_type, size=size) def new_string_dtype(space, size): - itemtype = types.StringType(size) + itemtype = types.StringType() return W_Dtype( itemtype, + size=size, num=NPY_STRING, kind=NPY_STRINGLTR, - name='string' + str(8 * itemtype.get_element_size()), + name='string' + str(8 * size * itemtype.get_element_size()), char=NPY_STRINGLTR, w_box_type = space.gettypefor(interp_boxes.W_StringBox), ) def new_unicode_dtype(space, size): - itemtype = types.UnicodeType(size) + itemtype = types.UnicodeType() return W_Dtype( itemtype, + size=size, num=NPY_UNICODE, kind=NPY_UNICODELTR, - name='unicode' + str(8 * itemtype.get_element_size()), + name='unicode' + str(8 * size * itemtype.get_element_size()), char=NPY_UNICODELTR, w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), ) @@ -663,7 +685,8 @@ float_type = self.w_floatlongdtype, ) self.w_stringdtype = W_Dtype( - types.StringType(0), + types.StringType(), + size=0, num=NPY_STRING, kind=NPY_STRINGLTR, name='string', @@ -673,7 +696,8 @@ aliases=["str"], ) self.w_unicodedtype = W_Dtype( - types.UnicodeType(0), + types.UnicodeType(), + size=0, num=NPY_UNICODE, kind=NPY_UNICODELTR, name='unicode', @@ -682,7 +706,8 @@ alternate_constructors=[space.w_unicode], ) self.w_voiddtype = W_Dtype( - types.VoidType(0), + types.VoidType(), + size=0, num=NPY_VOID, kind=NPY_VOIDLTR, name='void', @@ -750,7 +775,7 @@ self.w_intpdtype, self.w_uintpdtype, ] self.float_dtypes_by_num_bytes = sorted( - (dtype.itemtype.get_element_size(), dtype) + (dtype.get_size(), dtype) for dtype in float_dtypes ) self.dtypes_by_num = {} @@ -760,7 +785,7 @@ for dtype in reversed(self.builtin_dtypes): self.dtypes_by_num[dtype.num] = dtype self.dtypes_by_name[dtype.name] = dtype - can_name = dtype.kind + str(dtype.itemtype.get_element_size()) + can_name = dtype.kind + str(dtype.get_size()) self.dtypes_by_name[can_name] = dtype self.dtypes_by_name[NPY_NATBYTE + can_name] = dtype self.dtypes_by_name[NPY_NATIVE + can_name] = dtype @@ -830,7 +855,7 @@ for k, v in typeinfo_partial.iteritems(): space.setitem(w_typeinfo, space.wrap(k), space.gettypefor(v)) for k, dtype in typeinfo_full.iteritems(): - itemsize = dtype.itemtype.get_element_size() + itemsize = dtype.get_size() items_w = [space.wrap(dtype.char), space.wrap(dtype.num), space.wrap(itemsize * 8), # in case of changing diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -85,10 +85,10 @@ return space.wrap(len(self.get_shape())) def descr_get_itemsize(self, space): - return space.wrap(self.get_dtype().itemtype.get_element_size()) + return space.wrap(self.get_dtype().get_size()) def descr_get_nbytes(self, space): - return space.wrap(self.get_size() * self.get_dtype().itemtype.get_element_size()) + return space.wrap(self.get_size() * self.get_dtype().get_size()) def descr_fill(self, space, w_value): self.fill(self.get_dtype().coerce(space, w_value)) @@ -625,10 +625,10 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "itemset not implemented yet")) - @unwrap_spec(neworder=str) - def descr_newbyteorder(self, space, neworder): - raise OperationError(space.w_NotImplementedError, space.wrap( - "newbyteorder not implemented yet")) + @unwrap_spec(new_order=str) + def descr_newbyteorder(self, space, new_order=NPY_SWAP): + return self.descr_view(space, + self.get_dtype().descr_newbyteorder(space, new_order)) @unwrap_spec(w_axis=WrappedDefault(None), w_out=WrappedDefault(None)) @@ -1268,6 +1268,7 @@ diagonal = interp2app(W_NDimArray.descr_diagonal), trace = interp2app(W_NDimArray.descr_trace), view = interp2app(W_NDimArray.descr_view), + newbyteorder = interp2app(W_NDimArray.descr_newbyteorder), ctypes = GetSetProperty(W_NDimArray.descr_get_ctypes), # XXX unimplemented __array_interface__ = GetSetProperty(W_NDimArray.descr_array_iface), @@ -1340,7 +1341,7 @@ # not an array or incorrect dtype shape, elems_w = find_shape_and_elems(space, w_object, dtype) if dtype is None or ( - dtype.is_str_or_unicode() and dtype.itemtype.get_size() < 1): + dtype.is_str_or_unicode() and dtype.get_size() < 1): for w_elem in elems_w: dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, dtype) @@ -1349,7 +1350,7 @@ if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype - if dtype.is_str_or_unicode() and dtype.itemtype.get_size() < 1: + if dtype.is_str_or_unicode() and dtype.get_size() < 1: # promote S0 -> S1, U0 -> U1 dtype = interp_dtype.variable_dtype(space, dtype.char + '1') if ndmin > len(shape): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -475,8 +475,7 @@ if dt2.is_record_type(): return dt2 if dt1.is_str_or_unicode(): - if dt2.itemtype.get_element_size() >= \ - dt1.itemtype.get_element_size(): + if dt2.get_size() >= dt1.get_size(): return dt2 return dt1 return dt2 @@ -556,7 +555,7 @@ return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) elif current_guess.num == NPY_STRING: - if current_guess.itemtype.get_size() < space.len_w(w_obj): + if current_guess.get_size() < space.len_w(w_obj): return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -164,7 +164,7 @@ self.array = array self.offset = 0 self.dtype = array.dtype - self.skip = self.dtype.itemtype.get_element_size() + self.skip = self.dtype.get_size() self.size = array.size def setitem(self, elem): diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -288,6 +288,49 @@ assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, '<', None, None, None, -1, -1, 0)) assert loads(dumps(a.dtype)) == a.dtype + def test_newbyteorder(self): + import numpypy as np + import sys + sys_is_le = sys.byteorder == 'little' + native_code = sys_is_le and '<' or '>' + swapped_code = sys_is_le and '>' or '<' + native_dt = np.dtype(native_code+'i2') + swapped_dt = np.dtype(swapped_code+'i2') + assert native_dt.newbyteorder('S') == swapped_dt + assert native_dt.newbyteorder() == swapped_dt + assert native_dt == swapped_dt.newbyteorder('S') + assert native_dt == swapped_dt.newbyteorder('=') + assert native_dt == swapped_dt.newbyteorder('N') + assert native_dt == native_dt.newbyteorder('|') + assert np.dtype('i2') == native_dt.newbyteorder('>') + assert np.dtype('>i2') == native_dt.newbyteorder('B') + + for t in [np.int_, np.float_]: + dt = np.dtype(t) + dt1 = dt.newbyteorder().newbyteorder() + dt2 = dt.newbyteorder("<") + dt3 = dt.newbyteorder(">") + assert dt.byteorder != dt1.byteorder + #assert hash(dt) == hash(dt1) + if dt == dt2: + assert dt.byteorder != dt2.byteorder + #assert hash(dt) == hash(dt2) + else: + assert dt.byteorder != dt3.byteorder + #assert hash(dt) == hash(dt3) + + exc = raises(ValueError, dt.newbyteorder, 'XX') + assert exc.value[0] == 'XX is an unrecognized byteorder' + + for t in [np.int_, np.float_]: + dt1 = np.dtype(t) + dt2 = dt1.newbyteorder() + s1 = np.array(123, dtype=dt1).tostring() + s2 = np.array(123, dtype=dt2).byteswap().tostring() + assert s1 == s2 + class AppTestTypes(BaseAppTestDtypes): def test_abstract_types(self): import numpypy as numpy diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2928,6 +2928,15 @@ assert str(a.dtype) == '|S1' assert a == 'x' + def test_newbyteorder(self): + import numpy as np + a = np.array([1, 2], dtype=np.int16) + b = a.newbyteorder() + assert (b == [256, 512]).all() + c = b.byteswap() + assert (c == [1, 2]).all() + assert (a == [1, 2]).all() + def test_pickle(self): from numpypy import dtype, array from cPickle import loads, dumps diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1591,17 +1591,8 @@ ComponentBoxType = interp_boxes.W_FloatLongBox class BaseStringType(BaseType): - _immutable_fields = ['size'] - - def __init__(self, size=0): - BaseType.__init__(self) - self.size = size - def get_element_size(self): - return self.size * rffi.sizeof(self.T) - - def get_size(self): - return self.size + return rffi.sizeof(self.T) def str_unary_op(func): specialize.argtype(1)(func) @@ -1636,13 +1627,13 @@ def store(self, arr, i, offset, box): assert isinstance(box, interp_boxes.W_StringBox) - return self._store(arr.storage, i, offset, box) + size = min(arr.dtype.size, box.arr.size - box.ofs) + return self._store(arr.storage, i, offset, box, size) @jit.unroll_safe - def _store(self, storage, i, offset, box): + def _store(self, storage, i, offset, box, size): assert isinstance(box, interp_boxes.W_StringBox) - # XXX simplify to range(box.dtype.get_size()) ? - for k in range(min(self.size, box.arr.size-box.ofs)): + for k in range(size): storage[k + offset + i] = box.arr.storage[k + box.ofs] def read(self, arr, i, offset, dtype=None): @@ -1725,17 +1716,17 @@ else: w_arg = box.descr_str(space) arg = space.str_w(space.str(w_arg)) - arr = VoidBoxStorage(self.size, mydtype) + arr = VoidBoxStorage(mydtype.size, mydtype) i = 0 - for i in range(min(len(arg), self.size)): + for i in range(min(len(arg), mydtype.size)): arr.storage[i] = arg[i] - for j in range(i + 1, self.size): + for j in range(i + 1, mydtype.size): arr.storage[j] = '\x00' - return interp_boxes.W_StringBox(arr, 0, arr.dtype) + return interp_boxes.W_StringBox(arr, 0, arr.dtype) def fill(self, storage, width, box, start, stop, offset): for i in xrange(start, stop, width): - self._store(storage, i, offset, box) + self._store(storage, i, offset, box, width) class UnicodeType(BaseStringType): T = lltype.UniChar @@ -1772,14 +1763,14 @@ ofs += size def coerce(self, space, dtype, w_items): - arr = VoidBoxStorage(self.size, dtype) + arr = VoidBoxStorage(dtype.get_size(), dtype) self._coerce(space, arr, 0, dtype, w_items, dtype.shape) return interp_boxes.W_VoidBox(arr, 0, dtype) @jit.unroll_safe def store(self, arr, i, ofs, box): assert isinstance(box, interp_boxes.W_VoidBox) - for k in range(self.get_element_size()): + for k in range(box.arr.dtype.get_size()): arr.storage[k + ofs] = box.arr.storage[k + box.ofs] def readarray(self, arr, i, offset, dtype=None): @@ -1793,15 +1784,9 @@ class RecordType(BaseType): T = lltype.Char - _immutable_fields_ = ['offsets_and_fields', 'size'] - - def __init__(self, offsets_and_fields, size): - BaseType.__init__(self) - self.offsets_and_fields = offsets_and_fields - self.size = size def get_element_size(self): - return self.size + return rffi.sizeof(self.T) def read(self, arr, i, offset, dtype=None): if dtype is None: @@ -1817,14 +1802,14 @@ if not space.issequence_w(w_item): raise OperationError(space.w_TypeError, space.wrap( "expected sequence")) - if len(self.offsets_and_fields) != space.len_w(w_item): + if len(dtype.fields) != space.len_w(w_item): raise OperationError(space.w_ValueError, space.wrap( "wrong length")) items_w = space.fixedview(w_item) - arr = VoidBoxStorage(self.size, dtype) + arr = VoidBoxStorage(dtype.get_size(), dtype) for i in range(len(items_w)): - subdtype = dtype.fields[dtype.fieldnames[i]][1] - ofs, itemtype = self.offsets_and_fields[i] + ofs, subdtype = dtype.fields[dtype.fieldnames[i]] + itemtype = subdtype.itemtype w_item = items_w[i] w_box = itemtype.coerce(space, subdtype, w_item) itemtype.store(arr, 0, ofs, w_box) @@ -1833,7 +1818,7 @@ @jit.unroll_safe def store(self, arr, i, ofs, box): assert isinstance(box, interp_boxes.W_VoidBox) - for k in range(self.get_element_size()): + for k in range(box.arr.dtype.get_size()): arr.storage[k + i] = box.arr.storage[k + box.ofs] @jit.unroll_safe @@ -1841,7 +1826,9 @@ assert isinstance(box, interp_boxes.W_VoidBox) pieces = ["("] first = True - for ofs, tp in self.offsets_and_fields: + for name in box.dtype.fieldnames: + ofs, subdtype = box.dtype.fields[name] + tp = subdtype.itemtype if first: first = False else: From noreply at buildbot.pypy.org Thu Nov 14 09:34:00 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 09:34:00 +0100 (CET) Subject: [pypy-commit] pypy numpy-newbyteorder: close merged branch Message-ID: <20131114083400.69B0E1C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-newbyteorder Changeset: r68022:cf222adc939f Date: 2013-11-14 03:32 -0500 http://bitbucket.org/pypy/pypy/changeset/cf222adc939f/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 09:45:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 09:45:03 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge release-2.2.x Message-ID: <20131114084503.6DC0A1C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68023:f997b57170d5 Date: 2013-11-14 09:42 +0100 http://bitbucket.org/pypy/pypy/changeset/f997b57170d5/ Log: hg merge release-2.2.x diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -33,7 +33,7 @@ $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest -to use virtualenv with the resulting pypy-c as the interpreter, you can +to use virtualenv with the resulting pypy-c as the interpreter; you can find more details about various installation schemes here: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -347,6 +347,9 @@ errno = property(_get_errno, _set_errno, None, "the value of 'errno' from/to the C calls") + def getwinerror(self, code=-1): + return self._backend.getwinerror(code) + def _pointer_to(self, ctype): from . import model with self._lock: diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '2.1' +version = '2.2' # The full version, including alpha/beta/rc tags. -release = '2.1.0' +release = '2.2.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -48,6 +48,6 @@ * send announcements to pypy-dev, python-list, python-announce, python-dev ... -* add a tag on jitviewer that corresponds to pypy release -* add a tag on codespeed that corresponds to pypy release +* add a tag on the pypy/jitviewer repo that corresponds to pypy release +* add a tag on the codespeed web site that corresponds to pypy release diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.1.0`_: the latest official release +* `Release 2.2.0`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.1.0`: http://pypy.org/download.html +.. _`Release 2.2.0`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/release-2.2.0.rst b/pypy/doc/release-2.2.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.2.0.rst @@ -0,0 +1,89 @@ +======================================= +PyPy 2.2 - Incrementalism +======================================= + +We're pleased to announce PyPy 2.2, which targets version 2.7.3 of the Python +language. This release main highlight is the introduction of the incremental +garbage collector, sponsored by the `Raspberry Pi Foundation`_. + +This release also contains several bugfixes and performance improvements. + +You can download the PyPy 2.2 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. We showed quite a bit of progress on all three projects (see below) +and we're slowly running out of funds. +Please consider donating more so we can finish those projects! The three +projects are: + +* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. + +* STM (software transactional memory): a preview will be released very soon, + as soon as we fix a few bugs + +* NumPy: the work done is included in the PyPy 2.2 release. More details below. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* Our Garbage Collector is now "incremental". It should avoid almost + all pauses due to a major collection taking place. Previously, it + would pause the program (rarely) to walk all live objects, which + could take arbitrarily long if your process is using a whole lot of + RAM. Now the same work is done in steps. This should make PyPy + more responsive, e.g. in games. There are still other pauses, from + the GC and the JIT, but they should be on the order of 5 + milliseconds each. + +* The JIT counters for hot code were never reset, which meant that a + process running for long enough would eventually JIT-compile more + and more rarely executed code. Not only is it useless to compile + such code, but as more compiled code means more memory used, this + gives the impression of a memory leak. This has been tentatively + fixed by decreasing the counters from time to time. + +* NumPy has been split: now PyPy only contains the core module, called + ``_numpypy``. The ``numpy`` module itself has been moved to + ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. + You need to install NumPy separately with a virtualenv: + ``pip install git+https://bitbucket.org/pypy/numpy.git``; + or by directly doing + ``git clone https://bitbucket.org/pypy/numpy.git``, + ``cd numpy``, ``python setup.py install``. + +* non-inlined calls have less overhead + +* Things that use ``sys.set_trace`` are now JITted (like coverage) + +* JSON encoding used to be very fast, now decoding is as well + +* various buffer copying methods experience speedups (like list-of-ints to + ``int[]`` buffer from cffi) + +* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, + including ``os.startfile()`` on Windows and a handful of rare ones + on Posix. + +* numpy has a rudimentary C API that cooperates with ``cpyext`` + +Cheers, +Armin Rigo and Maciej Fijalkowski diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-2.2.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-2.2.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-2.2.rst @@ -1,5 +1,5 @@ ====================== -What's new in PyPy 2.1 +What's new in PyPy 2.2 ====================== .. this is a revision shortly after release-2.1-beta diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,140 +1,7 @@ -====================== -What's new in PyPy 2.1 -====================== +======================= +What's new in PyPy 2.2+ +======================= -.. this is a revision shortly after release-2.1-beta -.. startrev: 4eb52818e7c0 +.. this is a revision shortly after release-2.2.x +.. startrev: 4cd1bc8b3111 -.. branch: sanitise_bytecode_dispatch -Make PyPy's bytecode dispatcher easy to read, and less reliant on RPython -magic. There is no functional change, though the removal of dead code leads -to many fewer tests to execute. - -.. branch: fastjson -Fast json decoder written in RPython, about 3-4x faster than the pure Python -decoder which comes with the stdlib - -.. branch: improve-str2charp -Improve the performance of I/O writing up to 15% by using memcpy instead of -copying char-by-char in str2charp and get_nonmovingbuffer - -.. branch: flowoperators -Simplify rpython/flowspace/ code by using more metaprogramming. Create -SpaceOperator class to gather static information about flow graph operations. - -.. branch: package-tk -Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch -to optionally skip it. - -.. branch: distutils-cppldflags -Copy CPython's implementation of customize_compiler, dont call split on -environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. - -.. branch: precise-instantiate -When an RPython class is instantiated via an indirect call (that is, which -class is being instantiated isn't known precisely) allow the optimizer to have -more precise information about which functions can be called. Needed for Topaz. - -.. branch: ssl_moving_write_buffer - -.. branch: pythoninspect-fix -Make PyPy respect PYTHONINSPECT variable set via os.putenv in the same process -to start interactive prompt when the script execution finishes. This adds -new __pypy__.os.real_getenv call that bypasses Python cache and looksup env -in the underlying OS. Translatorshell now works on PyPy. - -.. branch: add-statvfs -Added os.statvfs and os.fstatvfs - -.. branch: statvfs_tests -Added some addition tests for statvfs. - -.. branch: ndarray-subtype -Allow subclassing ndarray, i.e. matrix - -.. branch: ndarray-sort -Implement ndarray in-place sorting (for numeric types, no non-native byte order) - -.. branch: pypy-pyarray -Implement much of numpy's c api in cpyext, allows (slow) access to ndarray -from c - -.. branch: kill-ootype - -.. branch: fast-slowpath -Added an abstraction for functions with a fast and slow path in the JIT. This -speeds up list.append() and list.pop(). - -.. branch: curses_fixes - -.. branch: foldable-getarrayitem-indexerror -Constant-fold reading out of constant tuples in PyPy. - -.. branch: mro-reorder-numpypy-str -No longer delegate numpy string_ methods to space.StringObject, in numpy -this works by kind of by accident. Support for merging the refactor-str-types -branch - -.. branch: kill-typesystem -Remove the "type system" abstraction, now that there is only ever one kind of -type system used. - -.. branch: kill-gen-store-back-in -Kills gen_store_back_in_virtualizable - should improve non-inlined calls by -a bit - -.. branch: dotviewer-linewidth -.. branch: reflex-support -.. branch: numpypy-inplace-op -.. branch: rewritten-loop-logging -.. branch: no-release-gil -.. branch: safe-win-mmap -.. branch: boolean-indexing-cleanup -.. branch: cpyext-best_base -.. branch: cpyext-int -.. branch: fileops2 - -.. branch: nobold-backtrace -Work on improving UnionError messages and stack trace displays. - -.. branch: improve-errors-again -More improvements and refactorings of error messages. - -.. branch: improve-errors-again2 -Unbreak tests in rlib. - -.. branch: less-stringly-ops -Use subclasses of SpaceOperation instead of SpaceOperator objects. -Random cleanups in flowspace. - -.. branch: file-support-in-rpython -make open() and friends rpython - -.. branch: incremental-gc -Added the new incminimark GC which performs GC in incremental steps - -.. branch: fast_cffi_list_init -fastpath for cffi.new("long[]") - -.. branch: remove-eval-frame -remove a pointless abstraction - -.. branch: jit-settrace -Allow the jit to continue running when sys.settrace() is active, necessary to -make coverage.py fast - -.. branch: remove-numpypy -Remove lib_pypy/numpypy in favor of external numpy fork - -.. branch: jit-counter -Tweak the jit counters: decay them at minor collection (actually -only every 32 minor collection is enough). Should avoid the "memory -leaks" observed in long-running processes, actually created by the -jit compiling more and more rarely executed paths. - -.. branch: fix-trace-jit -Fixed the usage of sys.settrace() with the JIT. Also made it so using -sys.settrace() doesn't cause the GIL to be released on every single iteration. - -.. branch: rordereddict -Implement OrderedDict in RPython diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.mixedmodule import MixedModule from rpython.rlib import rdynload @@ -43,6 +44,8 @@ 'FFI_DEFAULT_ABI': 'ctypefunc._get_abi(space, "FFI_DEFAULT_ABI")', 'FFI_CDECL': 'ctypefunc._get_abi(space,"FFI_DEFAULT_ABI")',#win32 name } + if sys.platform == 'win32': + interpleveldefs['getwinerror'] = 'cerrno.getwinerror' for _name in ["RTLD_LAZY", "RTLD_NOW", "RTLD_GLOBAL", "RTLD_LOCAL", "RTLD_NODELETE", "RTLD_NOLOAD", "RTLD_DEEPBIND"]: diff --git a/pypy/module/_cffi_backend/cerrno.py b/pypy/module/_cffi_backend/cerrno.py --- a/pypy/module/_cffi_backend/cerrno.py +++ b/pypy/module/_cffi_backend/cerrno.py @@ -39,3 +39,14 @@ def set_errno(space, errno): ec = get_errno_container(space) ec._cffi_saved_errno = errno + +# ____________________________________________________________ + + at unwrap_spec(code=int) +def getwinerror(space, code=-1): + from rpython.rlib.rwin32 import FormatError + if code == -1: + ec = get_errno_container(space) + code = ec._cffi_saved_LastError + message = FormatError(code) + return space.newtuple([space.wrap(code), space.wrap(message)]) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2687,6 +2687,16 @@ # res = GetLastError() assert res == 42 + # + SetLastError(2) + code, message = getwinerror() + assert code == 2 + assert message == "The system cannot find the file specified" + # + code, message = getwinerror(1155) + assert code == 1155 + assert message == ("No application is associated with the " + "specified file for this operation") def test_nonstandard_integer_types(): for typename in ['int8_t', 'uint8_t', 'int16_t', 'uint16_t', 'int32_t', diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -27,6 +27,7 @@ 'popen2': 'app_posix.popen2', 'popen3': 'app_posix.popen3', 'popen4': 'app_posix.popen4', + 'startfile': 'app_startfile.startfile', }) if hasattr(os, 'wait'): diff --git a/pypy/module/posix/app_startfile.py b/pypy/module/posix/app_startfile.py new file mode 100644 --- /dev/null +++ b/pypy/module/posix/app_startfile.py @@ -0,0 +1,44 @@ +# NOT_RPYTHON + +class CFFIWrapper(object): + def __init__(self): + import cffi + ffi = cffi.FFI() + ffi.cdef(""" + HINSTANCE ShellExecuteA(HWND, LPCSTR, LPCSTR, LPCSTR, LPCSTR, INT); + HINSTANCE ShellExecuteW(HWND, LPCWSTR, LPCWSTR, LPCWSTR, LPCWSTR, INT); + DWORD GetLastError(void); + """) + self.NULL = ffi.NULL + self.cast = ffi.cast + self.lib = ffi.dlopen("Shell32.dll") + self.SW_SHOWNORMAL = 1 + self.getwinerror = ffi.getwinerror + +_cffi_wrapper = None + + +def startfile(filepath, operation=None): + global _cffi_wrapper + if _cffi_wrapper is None: + _cffi_wrapper = CFFIWrapper() + w = _cffi_wrapper + # + if operation is None: + operation = w.NULL + if isinstance(filepath, str): + if isinstance(operation, unicode): + operation = operation.encode("ascii") + rc = w.lib.ShellExecuteA(w.NULL, operation, filepath, + w.NULL, w.NULL, w.SW_SHOWNORMAL) + elif isinstance(filepath, unicode): + if isinstance(operation, str): + operation = operation.decode("ascii") + rc = w.lib.ShellExecuteW(w.NULL, operation, filepath, + w.NULL, w.NULL, w.SW_SHOWNORMAL) + else: + raise TypeError("argument 1 must be str or unicode") + rc = int(w.cast("uintptr_t", rc)) + if rc <= 32: + code, msg = w.getwinerror() + raise WindowsError(code, msg, filepath) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -52,6 +52,7 @@ def setup_class(cls): cls.space = space + cls.w_runappdirect = space.wrap(cls.runappdirect) cls.w_posix = space.appexec([], GET_POSIX) cls.w_path = space.wrap(str(path)) cls.w_path2 = space.wrap(str(path2)) @@ -1108,6 +1109,28 @@ assert False, "urandom() always returns the same string" # Or very unlucky + if hasattr(os, 'startfile'): + def test_startfile(self): + if not self.runappdirect: + skip("should not try to import cffi at app-level") + startfile = self.posix.startfile + for t1 in [str, unicode]: + for t2 in [str, unicode]: + e = raises(WindowsError, startfile, t1("\\"), t2("close")) + assert e.value.args[0] == 1155 + assert e.value.args[1] == ( + "No application is associated with the " + "specified file for this operation") + if len(e.value.args) > 2: + assert e.value.args[2] == t1("\\") + # + e = raises(WindowsError, startfile, "\\foo\\bar\\baz") + assert e.value.args[0] == 2 + assert e.value.args[1] == ( + "The system cannot find the file specified") + if len(e.value.args) > 2: + assert e.value.args[2] == "\\foo\\bar\\baz" + class AppTestEnvironment(object): def setup_class(cls): diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py @@ -195,3 +195,20 @@ assert p.a[0] == 200 assert p.a[1] == 300 assert p.a[2] == 400 + + @pytest.mark.skipif("sys.platform != 'win32'") + def test_getwinerror(self): + ffi = FFI() + code, message = ffi.getwinerror(1155) + assert code == 1155 + assert message == ("No application is associated with the " + "specified file for this operation") + ffi.cdef("void SetLastError(int);") + lib = ffi.dlopen("Kernel32.dll") + lib.SetLastError(2) + code, message = ffi.getwinerror() + assert code == 2 + assert message == "The system cannot find the file specified" + code, message = ffi.getwinerror(-1) + assert code == 2 + assert message == "The system cannot find the file specified" diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py @@ -712,7 +712,7 @@ "#define BAZ ...\n") lib = ffi.verify("#define FOO 42\n" "#define BAR (-44)\n" - "#define BAZ 0xffffffffffffffffLL\n") + "#define BAZ 0xffffffffffffffffULL\n") assert lib.FOO == 42 assert lib.BAR == -44 assert lib.BAZ == 0xffffffffffffffff @@ -1602,6 +1602,8 @@ (maxulong, -1, ''), (-1, 0xffffffff, 'U'), (-1, maxulong, 'UL')]: + if c2c and sys.platform == 'win32': + continue # enums may always be signed with MSVC ffi = FFI() ffi.cdef("enum foo_e { AA=%s };" % c1) e = py.test.raises(VerificationError, ffi.verify, diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -5,7 +5,6 @@ from __future__ import absolute_import import sys, types, inspect, weakref -from collections import OrderedDict from rpython.flowspace.model import Constant from rpython.annotator.model import (SomeOrderedDict, @@ -371,7 +370,7 @@ for e in x: listdef.generalize(self.immutablevalue(e, False)) result = SomeList(listdef) - elif tp is dict or tp is r_dict or tp is OrderedDict: + elif tp is dict or tp is r_dict or tp is SomeOrderedDict.knowntype: if need_const: key = Constant(x) try: @@ -413,7 +412,7 @@ dictdef.generalize_key(self.immutablevalue(ek, False)) dictdef.generalize_value(self.immutablevalue(ev, False)) dictdef.seen_prebuilt_key(ek) - if tp is OrderedDict: + if tp is SomeOrderedDict.knowntype: result = SomeOrderedDict(dictdef) else: result = SomeDict(dictdef) diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -2,7 +2,6 @@ Built-in functions. """ import sys -from collections import OrderedDict from rpython.annotator.model import ( SomeInteger, SomeObject, SomeChar, SomeBool, SomeString, SomeTuple, s_Bool, @@ -364,7 +363,7 @@ BUILTIN_ANALYZERS[rpython.rlib.objectmodel.instantiate] = robjmodel_instantiate BUILTIN_ANALYZERS[rpython.rlib.objectmodel.r_dict] = robjmodel_r_dict BUILTIN_ANALYZERS[rpython.rlib.objectmodel.r_ordereddict] = robjmodel_r_ordereddict -BUILTIN_ANALYZERS[OrderedDict] = lambda : SomeOrderedDict(getbookkeeper().getdictdef()) +BUILTIN_ANALYZERS[SomeOrderedDict.knowntype] = lambda : SomeOrderedDict(getbookkeeper().getdictdef()) BUILTIN_ANALYZERS[rpython.rlib.objectmodel.hlinvoke] = robjmodel_hlinvoke BUILTIN_ANALYZERS[rpython.rlib.objectmodel.keepalive_until_here] = robjmodel_keepalive_until_here BUILTIN_ANALYZERS[rpython.rtyper.lltypesystem.llmemory.cast_ptr_to_adr] = llmemory_cast_ptr_to_adr diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -32,7 +32,6 @@ import inspect import weakref from types import BuiltinFunctionType, MethodType -from collections import OrderedDict import rpython from rpython.tool import descriptor @@ -357,7 +356,11 @@ return '{...%s...}' % (len(const),) class SomeOrderedDict(SomeDict): - knowntype = OrderedDict + try: + from collections import OrderedDict as knowntype + except ImportError: # Python 2.6 + class PseudoOrderedDict(dict): pass + knowntype = PseudoOrderedDict def method_copy(dct): return SomeOrderedDict(dct.dictdef) diff --git a/rpython/rlib/rwin32.py b/rpython/rlib/rwin32.py --- a/rpython/rlib/rwin32.py +++ b/rpython/rlib/rwin32.py @@ -216,7 +216,7 @@ def llimpl_FormatError(code): "Return a message corresponding to the given Windows error code." buf = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') - + buf[0] = lltype.nullptr(rffi.CCHARP.TO) try: msglen = FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, @@ -225,17 +225,20 @@ DEFAULT_LANGUAGE, rffi.cast(rffi.CCHARP, buf), 0, None) + buflen = intmask(msglen) - if msglen <= 2: # includes the case msglen < 0 - return fake_FormatError(code) + # remove trailing cr/lf and dots + s_buf = buf[0] + while buflen > 0 and (s_buf[buflen - 1] <= ' ' or + s_buf[buflen - 1] == '.'): + buflen -= 1 - # FormatMessage always appends \r\n. - buflen = intmask(msglen - 2) - assert buflen > 0 - - result = rffi.charpsize2str(buf[0], buflen) + if buflen <= 0: + result = fake_FormatError(code) + else: + result = rffi.charpsize2str(s_buf, buflen) + finally: LocalFree(rffi.cast(rffi.VOIDP, buf[0])) - finally: lltype.free(buf, flavor='raw') return result diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -1,4 +1,3 @@ -from collections import OrderedDict from rpython.annotator import model as annmodel from rpython.flowspace.model import Constant @@ -750,7 +749,7 @@ BUILTIN_TYPER[isinstance] = rtype_builtin_isinstance BUILTIN_TYPER[hasattr] = rtype_builtin_hasattr BUILTIN_TYPER[objectmodel.r_dict] = rtype_r_dict -BUILTIN_TYPER[OrderedDict] = rtype_ordered_dict +BUILTIN_TYPER[annmodel.SomeOrderedDict.knowntype] = rtype_ordered_dict BUILTIN_TYPER[objectmodel.r_ordereddict] = rtype_ordered_dict # _________________________________________________________________ diff --git a/rpython/rtyper/test/test_rordereddict.py b/rpython/rtyper/test/test_rordereddict.py --- a/rpython/rtyper/test/test_rordereddict.py +++ b/rpython/rtyper/test/test_rordereddict.py @@ -1,6 +1,9 @@ import py -from collections import OrderedDict +try: + from collections import OrderedDict +except ImportError: # Python 2.6 + py.test.skip("requires collections.OrderedDict") from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem import rordereddict, rstr from rpython.rlib.rarithmetic import intmask From noreply at buildbot.pypy.org Thu Nov 14 09:45:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 09:45:04 +0100 (CET) Subject: [pypy-commit] pypy default: update Message-ID: <20131114084504.B6BDE1C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68024:f606890f99ed Date: 2013-11-14 09:43 +0100 http://bitbucket.org/pypy/pypy/changeset/f606890f99ed/ Log: update diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,3 +5,4 @@ .. this is a revision shortly after release-2.2.x .. startrev: 4cd1bc8b3111 +.. branch: release-2.2.x From noreply at buildbot.pypy.org Thu Nov 14 09:45:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 09:45:06 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20131114084506.0393D1C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68025:315906031082 Date: 2013-11-14 09:44 +0100 http://bitbucket.org/pypy/pypy/changeset/315906031082/ Log: merge heads diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -52,7 +52,7 @@ loop.setslice(space, shape, self, impl) def get_size(self): - return self.size // self.dtype.itemtype.get_element_size() + return self.size // self.dtype.get_size() def get_storage_size(self): return self.size @@ -399,7 +399,7 @@ self.storage = parent.storage self.order = parent.order self.dtype = dtype - self.size = support.product(shape) * self.dtype.itemtype.get_element_size() + self.size = support.product(shape) * self.dtype.get_size() self.start = start self.orig_arr = orig_arr diff --git a/pypy/module/micronumpy/conversion_utils.py b/pypy/module/micronumpy/conversion_utils.py --- a/pypy/module/micronumpy/conversion_utils.py +++ b/pypy/module/micronumpy/conversion_utils.py @@ -1,6 +1,27 @@ from pypy.interpreter.error import OperationError from pypy.module.micronumpy.constants import * + +def byteorder_converter(space, new_order): + endian = new_order[0] + if endian not in (NPY_BIG, NPY_LITTLE, NPY_NATIVE, NPY_IGNORE, NPY_SWAP): + ch = endian + if ch in ('b', 'B'): + endian = NPY_BIG + elif ch in ('l', 'L'): + endian = NPY_LITTLE + elif ch in ('n', 'N'): + endian = NPY_NATIVE + elif ch in ('i', 'I'): + endian = NPY_IGNORE + elif ch in ('s', 'S'): + endian = NPY_SWAP + else: + raise OperationError(space.w_ValueError, space.wrap( + "%s is an unrecognized byteorder" % new_order)) + return endian + + def clipmode_converter(space, w_mode): if space.is_none(w_mode): return NPY_RAISE @@ -19,6 +40,7 @@ raise OperationError(space.w_TypeError, space.wrap("clipmode not understood")) + def order_converter(space, w_order, default): if space.is_none(w_order): return default @@ -41,6 +63,7 @@ raise OperationError(space.w_TypeError, space.wrap( "order not understood")) + def multi_axis_converter(space, w_axis, ndim): if space.is_none(w_axis): return [True] * ndim diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -9,6 +9,7 @@ from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong from rpython.rtyper.lltypesystem import rffi from rpython.rlib import jit +from pypy.module.micronumpy.conversion_utils import byteorder_converter from pypy.module.micronumpy.constants import * @@ -34,10 +35,12 @@ return out class W_Dtype(W_Root): - _immutable_fields_ = ["itemtype?", "num", "kind", "name?", "char", "w_box_type", "byteorder", "float_type"] + _immutable_fields_ = ["itemtype?", "num", "kind", "name?", "char", + "w_box_type", "byteorder", "size?", "float_type", + "fields?", "fieldnames?", "shape", "subdtype", "base"] def __init__(self, itemtype, num, kind, name, char, w_box_type, byteorder=NPY_NATIVE, - alternate_constructors=[], aliases=[], float_type=None, + size=1, alternate_constructors=[], aliases=[], float_type=None, fields=None, fieldnames=None, shape=[], subdtype=None): self.itemtype = itemtype self.num = num @@ -46,6 +49,7 @@ self.char = char self.w_box_type = w_box_type self.byteorder = byteorder + self.size = size self.alternate_constructors = alternate_constructors self.aliases = aliases self.float_type = float_type @@ -122,7 +126,7 @@ return self.byteorder in (NPY_NATIVE, NPY_NATBYTE) def get_size(self): - return self.itemtype.get_element_size() + return self.size * self.itemtype.get_element_size() def get_name(self): if self.char == 'S': @@ -136,7 +140,7 @@ return space.wrap("dtype('%s')" % self.get_name()) def descr_get_itemsize(self, space): - return space.wrap(self.itemtype.get_element_size()) + return space.wrap(self.get_size()) def descr_get_alignment(self, space): return space.wrap(self.itemtype.alignment) @@ -196,7 +200,6 @@ self.fields = None else: self.fields = {} - ofs_and_items = [] size = 0 for key in space.listview(w_fields): value = space.getitem(w_fields, key) @@ -207,11 +210,11 @@ offset = space.int_w(space.getitem(value, space.wrap(1))) self.fields[space.str_w(key)] = offset, dtype - ofs_and_items.append((offset, dtype.itemtype)) - size += dtype.itemtype.get_element_size() + size += dtype.get_size() - self.itemtype = types.RecordType(ofs_and_items, size) - self.name = "void" + str(8 * self.itemtype.get_element_size()) + self.itemtype = types.RecordType() + self.size = size + self.name = "void" + str(8 * self.get_size()) def descr_get_names(self, space): if self.fieldnames is None: @@ -263,7 +266,7 @@ w_class = space.type(self) kind = self.kind - elemsize = self.itemtype.get_element_size() + elemsize = self.get_size() builder_args = space.newtuple([space.wrap("%s%d" % (kind, elemsize)), space.wrap(0), space.wrap(1)]) version = space.wrap(3) @@ -308,11 +311,23 @@ fields = space.getitem(w_data, space.wrap(4)) self.set_fields(space, fields) + @unwrap_spec(new_order=str) + def descr_newbyteorder(self, space, new_order=NPY_SWAP): + newendian = byteorder_converter(space, new_order) + endian = self.byteorder + if endian != NPY_IGNORE: + if newendian == NPY_SWAP: + endian = NPY_OPPBYTE if self.is_native() else NPY_NATBYTE + elif newendian != NPY_IGNORE: + endian = newendian + itemtype = self.itemtype.__class__(endian in (NPY_NATIVE, NPY_NATBYTE)) + return W_Dtype(itemtype, self.num, self.kind, self.name, self.char, + self.w_box_type, endian, size=self.size) + def dtype_from_list(space, w_lst): lst_w = space.listview(w_lst) fields = {} offset = 0 - ofs_and_items = [] fieldnames = [] for w_elem in lst_w: size = 1 @@ -329,13 +344,13 @@ raise OperationError(space.w_ValueError, space.wrap("two fields with the same name")) assert isinstance(subdtype, W_Dtype) fields[fldname] = (offset, subdtype) - ofs_and_items.append((offset, subdtype.itemtype)) - offset += subdtype.itemtype.get_element_size() * size + offset += subdtype.get_size() * size fieldnames.append(fldname) - itemtype = types.RecordType(ofs_and_items, offset) - return W_Dtype(itemtype, NPY_VOID, NPY_VOIDLTR, "void" + str(8 * itemtype.get_element_size()), - NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), fields=fields, - fieldnames=fieldnames) + itemtype = types.RecordType() + return W_Dtype(itemtype, NPY_VOID, NPY_VOIDLTR, + "void" + str(8 * offset * itemtype.get_element_size()), + NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), + fields=fields, fieldnames=fieldnames, size=offset) def dtype_from_dict(space, w_dict): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -349,7 +364,8 @@ # w_align and w_copy are necessary for pickling cache = get_dtype_cache(space) - if w_shape is not None and (space.isinstance_w(w_shape, space.w_int) or space.len_w(w_shape) > 0): + if w_shape is not None and (space.isinstance_w(w_shape, space.w_int) or + space.len_w(w_shape) > 0): subdtype = descr__new__(space, w_subtype, w_dtype, w_align, w_copy) assert isinstance(subdtype, W_Dtype) size = 1 @@ -360,8 +376,11 @@ dim = space.int_w(w_dim) shape.append(dim) size *= dim - return W_Dtype(types.VoidType(subdtype.itemtype.get_element_size() * size), NPY_VOID, NPY_VOIDLTR, "void" + str(8 * subdtype.itemtype.get_element_size() * size), - NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), shape=shape, subdtype=subdtype) + return W_Dtype(types.VoidType(), NPY_VOID, NPY_VOIDLTR, + "void" + str(8 * subdtype.get_size() * size), + NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), + shape=shape, subdtype=subdtype, + size=subdtype.get_size() * size) if space.is_none(w_dtype): return cache.w_float64dtype @@ -413,6 +432,7 @@ __reduce__ = interp2app(W_Dtype.descr_reduce), __setstate__ = interp2app(W_Dtype.descr_setstate), + newbyteorder = interp2app(W_Dtype.descr_newbyteorder), type = interp_attrproperty_w("w_box_type", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), @@ -450,17 +470,17 @@ size = 1 if char == NPY_STRINGLTR: - itemtype = types.StringType(size) + itemtype = types.StringType() basename = 'string' num = NPY_STRING w_box_type = space.gettypefor(interp_boxes.W_StringBox) elif char == NPY_VOIDLTR: - itemtype = types.VoidType(size) + itemtype = types.VoidType() basename = 'void' num = NPY_VOID w_box_type = space.gettypefor(interp_boxes.W_VoidBox) elif char == NPY_UNICODELTR: - itemtype = types.UnicodeType(size) + itemtype = types.UnicodeType() basename = 'unicode' num = NPY_UNICODE w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox) @@ -468,27 +488,29 @@ assert False return W_Dtype(itemtype, num, char, - basename + str(8 * itemtype.get_element_size()), - char, w_box_type) + basename + str(8 * size * itemtype.get_element_size()), + char, w_box_type, size=size) def new_string_dtype(space, size): - itemtype = types.StringType(size) + itemtype = types.StringType() return W_Dtype( itemtype, + size=size, num=NPY_STRING, kind=NPY_STRINGLTR, - name='string' + str(8 * itemtype.get_element_size()), + name='string' + str(8 * size * itemtype.get_element_size()), char=NPY_STRINGLTR, w_box_type = space.gettypefor(interp_boxes.W_StringBox), ) def new_unicode_dtype(space, size): - itemtype = types.UnicodeType(size) + itemtype = types.UnicodeType() return W_Dtype( itemtype, + size=size, num=NPY_UNICODE, kind=NPY_UNICODELTR, - name='unicode' + str(8 * itemtype.get_element_size()), + name='unicode' + str(8 * size * itemtype.get_element_size()), char=NPY_UNICODELTR, w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), ) @@ -663,7 +685,8 @@ float_type = self.w_floatlongdtype, ) self.w_stringdtype = W_Dtype( - types.StringType(0), + types.StringType(), + size=0, num=NPY_STRING, kind=NPY_STRINGLTR, name='string', @@ -673,7 +696,8 @@ aliases=["str"], ) self.w_unicodedtype = W_Dtype( - types.UnicodeType(0), + types.UnicodeType(), + size=0, num=NPY_UNICODE, kind=NPY_UNICODELTR, name='unicode', @@ -682,7 +706,8 @@ alternate_constructors=[space.w_unicode], ) self.w_voiddtype = W_Dtype( - types.VoidType(0), + types.VoidType(), + size=0, num=NPY_VOID, kind=NPY_VOIDLTR, name='void', @@ -750,7 +775,7 @@ self.w_intpdtype, self.w_uintpdtype, ] self.float_dtypes_by_num_bytes = sorted( - (dtype.itemtype.get_element_size(), dtype) + (dtype.get_size(), dtype) for dtype in float_dtypes ) self.dtypes_by_num = {} @@ -760,7 +785,7 @@ for dtype in reversed(self.builtin_dtypes): self.dtypes_by_num[dtype.num] = dtype self.dtypes_by_name[dtype.name] = dtype - can_name = dtype.kind + str(dtype.itemtype.get_element_size()) + can_name = dtype.kind + str(dtype.get_size()) self.dtypes_by_name[can_name] = dtype self.dtypes_by_name[NPY_NATBYTE + can_name] = dtype self.dtypes_by_name[NPY_NATIVE + can_name] = dtype @@ -830,7 +855,7 @@ for k, v in typeinfo_partial.iteritems(): space.setitem(w_typeinfo, space.wrap(k), space.gettypefor(v)) for k, dtype in typeinfo_full.iteritems(): - itemsize = dtype.itemtype.get_element_size() + itemsize = dtype.get_size() items_w = [space.wrap(dtype.char), space.wrap(dtype.num), space.wrap(itemsize * 8), # in case of changing diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -85,10 +85,10 @@ return space.wrap(len(self.get_shape())) def descr_get_itemsize(self, space): - return space.wrap(self.get_dtype().itemtype.get_element_size()) + return space.wrap(self.get_dtype().get_size()) def descr_get_nbytes(self, space): - return space.wrap(self.get_size() * self.get_dtype().itemtype.get_element_size()) + return space.wrap(self.get_size() * self.get_dtype().get_size()) def descr_fill(self, space, w_value): self.fill(self.get_dtype().coerce(space, w_value)) @@ -625,10 +625,10 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "itemset not implemented yet")) - @unwrap_spec(neworder=str) - def descr_newbyteorder(self, space, neworder): - raise OperationError(space.w_NotImplementedError, space.wrap( - "newbyteorder not implemented yet")) + @unwrap_spec(new_order=str) + def descr_newbyteorder(self, space, new_order=NPY_SWAP): + return self.descr_view(space, + self.get_dtype().descr_newbyteorder(space, new_order)) @unwrap_spec(w_axis=WrappedDefault(None), w_out=WrappedDefault(None)) @@ -1268,6 +1268,7 @@ diagonal = interp2app(W_NDimArray.descr_diagonal), trace = interp2app(W_NDimArray.descr_trace), view = interp2app(W_NDimArray.descr_view), + newbyteorder = interp2app(W_NDimArray.descr_newbyteorder), ctypes = GetSetProperty(W_NDimArray.descr_get_ctypes), # XXX unimplemented __array_interface__ = GetSetProperty(W_NDimArray.descr_array_iface), @@ -1340,7 +1341,7 @@ # not an array or incorrect dtype shape, elems_w = find_shape_and_elems(space, w_object, dtype) if dtype is None or ( - dtype.is_str_or_unicode() and dtype.itemtype.get_size() < 1): + dtype.is_str_or_unicode() and dtype.get_size() < 1): for w_elem in elems_w: dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, dtype) @@ -1349,7 +1350,7 @@ if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype - if dtype.is_str_or_unicode() and dtype.itemtype.get_size() < 1: + if dtype.is_str_or_unicode() and dtype.get_size() < 1: # promote S0 -> S1, U0 -> U1 dtype = interp_dtype.variable_dtype(space, dtype.char + '1') if ndmin > len(shape): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -475,8 +475,7 @@ if dt2.is_record_type(): return dt2 if dt1.is_str_or_unicode(): - if dt2.itemtype.get_element_size() >= \ - dt1.itemtype.get_element_size(): + if dt2.get_size() >= dt1.get_size(): return dt2 return dt1 return dt2 @@ -556,7 +555,7 @@ return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) elif current_guess.num == NPY_STRING: - if current_guess.itemtype.get_size() < space.len_w(w_obj): + if current_guess.get_size() < space.len_w(w_obj): return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -164,7 +164,7 @@ self.array = array self.offset = 0 self.dtype = array.dtype - self.skip = self.dtype.itemtype.get_element_size() + self.skip = self.dtype.get_size() self.size = array.size def setitem(self, elem): diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -288,6 +288,49 @@ assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, '<', None, None, None, -1, -1, 0)) assert loads(dumps(a.dtype)) == a.dtype + def test_newbyteorder(self): + import numpypy as np + import sys + sys_is_le = sys.byteorder == 'little' + native_code = sys_is_le and '<' or '>' + swapped_code = sys_is_le and '>' or '<' + native_dt = np.dtype(native_code+'i2') + swapped_dt = np.dtype(swapped_code+'i2') + assert native_dt.newbyteorder('S') == swapped_dt + assert native_dt.newbyteorder() == swapped_dt + assert native_dt == swapped_dt.newbyteorder('S') + assert native_dt == swapped_dt.newbyteorder('=') + assert native_dt == swapped_dt.newbyteorder('N') + assert native_dt == native_dt.newbyteorder('|') + assert np.dtype('i2') == native_dt.newbyteorder('>') + assert np.dtype('>i2') == native_dt.newbyteorder('B') + + for t in [np.int_, np.float_]: + dt = np.dtype(t) + dt1 = dt.newbyteorder().newbyteorder() + dt2 = dt.newbyteorder("<") + dt3 = dt.newbyteorder(">") + assert dt.byteorder != dt1.byteorder + #assert hash(dt) == hash(dt1) + if dt == dt2: + assert dt.byteorder != dt2.byteorder + #assert hash(dt) == hash(dt2) + else: + assert dt.byteorder != dt3.byteorder + #assert hash(dt) == hash(dt3) + + exc = raises(ValueError, dt.newbyteorder, 'XX') + assert exc.value[0] == 'XX is an unrecognized byteorder' + + for t in [np.int_, np.float_]: + dt1 = np.dtype(t) + dt2 = dt1.newbyteorder() + s1 = np.array(123, dtype=dt1).tostring() + s2 = np.array(123, dtype=dt2).byteswap().tostring() + assert s1 == s2 + class AppTestTypes(BaseAppTestDtypes): def test_abstract_types(self): import numpypy as numpy diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2928,6 +2928,15 @@ assert str(a.dtype) == '|S1' assert a == 'x' + def test_newbyteorder(self): + import numpy as np + a = np.array([1, 2], dtype=np.int16) + b = a.newbyteorder() + assert (b == [256, 512]).all() + c = b.byteswap() + assert (c == [1, 2]).all() + assert (a == [1, 2]).all() + def test_pickle(self): from numpypy import dtype, array from cPickle import loads, dumps diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1591,17 +1591,8 @@ ComponentBoxType = interp_boxes.W_FloatLongBox class BaseStringType(BaseType): - _immutable_fields = ['size'] - - def __init__(self, size=0): - BaseType.__init__(self) - self.size = size - def get_element_size(self): - return self.size * rffi.sizeof(self.T) - - def get_size(self): - return self.size + return rffi.sizeof(self.T) def str_unary_op(func): specialize.argtype(1)(func) @@ -1636,13 +1627,13 @@ def store(self, arr, i, offset, box): assert isinstance(box, interp_boxes.W_StringBox) - return self._store(arr.storage, i, offset, box) + size = min(arr.dtype.size, box.arr.size - box.ofs) + return self._store(arr.storage, i, offset, box, size) @jit.unroll_safe - def _store(self, storage, i, offset, box): + def _store(self, storage, i, offset, box, size): assert isinstance(box, interp_boxes.W_StringBox) - # XXX simplify to range(box.dtype.get_size()) ? - for k in range(min(self.size, box.arr.size-box.ofs)): + for k in range(size): storage[k + offset + i] = box.arr.storage[k + box.ofs] def read(self, arr, i, offset, dtype=None): @@ -1725,17 +1716,17 @@ else: w_arg = box.descr_str(space) arg = space.str_w(space.str(w_arg)) - arr = VoidBoxStorage(self.size, mydtype) + arr = VoidBoxStorage(mydtype.size, mydtype) i = 0 - for i in range(min(len(arg), self.size)): + for i in range(min(len(arg), mydtype.size)): arr.storage[i] = arg[i] - for j in range(i + 1, self.size): + for j in range(i + 1, mydtype.size): arr.storage[j] = '\x00' - return interp_boxes.W_StringBox(arr, 0, arr.dtype) + return interp_boxes.W_StringBox(arr, 0, arr.dtype) def fill(self, storage, width, box, start, stop, offset): for i in xrange(start, stop, width): - self._store(storage, i, offset, box) + self._store(storage, i, offset, box, width) class UnicodeType(BaseStringType): T = lltype.UniChar @@ -1772,14 +1763,14 @@ ofs += size def coerce(self, space, dtype, w_items): - arr = VoidBoxStorage(self.size, dtype) + arr = VoidBoxStorage(dtype.get_size(), dtype) self._coerce(space, arr, 0, dtype, w_items, dtype.shape) return interp_boxes.W_VoidBox(arr, 0, dtype) @jit.unroll_safe def store(self, arr, i, ofs, box): assert isinstance(box, interp_boxes.W_VoidBox) - for k in range(self.get_element_size()): + for k in range(box.arr.dtype.get_size()): arr.storage[k + ofs] = box.arr.storage[k + box.ofs] def readarray(self, arr, i, offset, dtype=None): @@ -1793,15 +1784,9 @@ class RecordType(BaseType): T = lltype.Char - _immutable_fields_ = ['offsets_and_fields', 'size'] - - def __init__(self, offsets_and_fields, size): - BaseType.__init__(self) - self.offsets_and_fields = offsets_and_fields - self.size = size def get_element_size(self): - return self.size + return rffi.sizeof(self.T) def read(self, arr, i, offset, dtype=None): if dtype is None: @@ -1817,14 +1802,14 @@ if not space.issequence_w(w_item): raise OperationError(space.w_TypeError, space.wrap( "expected sequence")) - if len(self.offsets_and_fields) != space.len_w(w_item): + if len(dtype.fields) != space.len_w(w_item): raise OperationError(space.w_ValueError, space.wrap( "wrong length")) items_w = space.fixedview(w_item) - arr = VoidBoxStorage(self.size, dtype) + arr = VoidBoxStorage(dtype.get_size(), dtype) for i in range(len(items_w)): - subdtype = dtype.fields[dtype.fieldnames[i]][1] - ofs, itemtype = self.offsets_and_fields[i] + ofs, subdtype = dtype.fields[dtype.fieldnames[i]] + itemtype = subdtype.itemtype w_item = items_w[i] w_box = itemtype.coerce(space, subdtype, w_item) itemtype.store(arr, 0, ofs, w_box) @@ -1833,7 +1818,7 @@ @jit.unroll_safe def store(self, arr, i, ofs, box): assert isinstance(box, interp_boxes.W_VoidBox) - for k in range(self.get_element_size()): + for k in range(box.arr.dtype.get_size()): arr.storage[k + i] = box.arr.storage[k + box.ofs] @jit.unroll_safe @@ -1841,7 +1826,9 @@ assert isinstance(box, interp_boxes.W_VoidBox) pieces = ["("] first = True - for ofs, tp in self.offsets_and_fields: + for name in box.dtype.fieldnames: + ofs, subdtype = box.dtype.fields[name] + tp = subdtype.itemtype if first: first = False else: From noreply at buildbot.pypy.org Thu Nov 14 09:51:33 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 09:51:33 +0100 (CET) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <20131114085133.D911F1C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68026:c69c2692ce2e Date: 2013-11-14 03:50 -0500 http://bitbucket.org/pypy/pypy/changeset/c69c2692ce2e/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -6,3 +6,6 @@ .. startrev: 4cd1bc8b3111 .. branch: release-2.2.x + +.. branch: numpy-newbyteorder +Clean up numpy types, add newbyteorder functionality From noreply at buildbot.pypy.org Thu Nov 14 09:57:50 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 09:57:50 +0100 (CET) Subject: [pypy-commit] pypy cleanup-numpypy-namespace: close merged branch Message-ID: <20131114085750.52B7F1C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: cleanup-numpypy-namespace Changeset: r68027:9d9386541679 Date: 2013-11-14 03:54 -0500 http://bitbucket.org/pypy/pypy/changeset/9d9386541679/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 09:57:51 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 09:57:51 +0100 (CET) Subject: [pypy-commit] pypy numpypy-inplace-op: close merged branch Message-ID: <20131114085751.7E2381C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpypy-inplace-op Changeset: r68028:3271bde53cb5 Date: 2013-11-14 03:55 -0500 http://bitbucket.org/pypy/pypy/changeset/3271bde53cb5/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 09:57:52 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 09:57:52 +0100 (CET) Subject: [pypy-commit] pypy numpypy_count_nonzero: close merged branch Message-ID: <20131114085752.B97771C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpypy_count_nonzero Changeset: r68029:3112b0eadb86 Date: 2013-11-14 03:56 -0500 http://bitbucket.org/pypy/pypy/changeset/3112b0eadb86/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:01:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 10:01:10 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Added tag release-2.2.0 for changeset 8eb5b5ac4bba Message-ID: <20131114090110.A059E1C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r68030:6c8420e89b80 Date: 2013-11-14 10:00 +0100 http://bitbucket.org/pypy/pypy/changeset/6c8420e89b80/ Log: Added tag release-2.2.0 for changeset 8eb5b5ac4bba diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -6,3 +6,4 @@ 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm +8eb5b5ac4bba366e7c7519c186bdfcf9c28c075d release-2.2.0 From noreply at buildbot.pypy.org Thu Nov 14 10:02:58 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:02:58 +0100 (CET) Subject: [pypy-commit] pypy dtype-isnative: close merged branch Message-ID: <20131114090258.2FB691C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: dtype-isnative Changeset: r68031:ebf84dd7f970 Date: 2013-11-14 04:00 -0500 http://bitbucket.org/pypy/pypy/changeset/ebf84dd7f970/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:02:59 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:02:59 +0100 (CET) Subject: [pypy-commit] pypy ndmin: close merged branch Message-ID: <20131114090259.64BED1C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: ndmin Changeset: r68032:9d2bc73626af Date: 2013-11-14 04:01 -0500 http://bitbucket.org/pypy/pypy/changeset/9d2bc73626af/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:03:00 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:03:00 +0100 (CET) Subject: [pypy-commit] pypy array_equal: close merged branch Message-ID: <20131114090300.78ED51C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: array_equal Changeset: r68033:7630ca70381b Date: 2013-11-14 04:01 -0500 http://bitbucket.org/pypy/pypy/changeset/7630ca70381b/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:09:26 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:09:26 +0100 (CET) Subject: [pypy-commit] pypy pickle-dumps: close merged branch Message-ID: <20131114090926.1B1D51C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: pickle-dumps Changeset: r68034:3fec18025f0d Date: 2013-11-14 04:05 -0500 http://bitbucket.org/pypy/pypy/changeset/3fec18025f0d/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:09:27 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:09:27 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes: close merged branch Message-ID: <20131114090927.47C001C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: win32-fixes Changeset: r68035:45ecc4a42058 Date: 2013-11-14 04:06 -0500 http://bitbucket.org/pypy/pypy/changeset/45ecc4a42058/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:09:28 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:09:28 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.4-poll-fix: close merged branch Message-ID: <20131114090928.6EDF01C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.4-poll-fix Changeset: r68036:09e6c7429ebb Date: 2013-11-14 04:07 -0500 http://bitbucket.org/pypy/pypy/changeset/09e6c7429ebb/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:09:29 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:09:29 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.4-pwd-fix: close merged branch Message-ID: <20131114090929.ACBBD1C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.4-pwd-fix Changeset: r68037:048031fbb4f9 Date: 2013-11-14 04:07 -0500 http://bitbucket.org/pypy/pypy/changeset/048031fbb4f9/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:09:30 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:09:30 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.4-fixed-io: close merged branch Message-ID: <20131114090930.D56DB1C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.4-fixed-io Changeset: r68038:e6b76694838f Date: 2013-11-14 04:07 -0500 http://bitbucket.org/pypy/pypy/changeset/e6b76694838f/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:09:31 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:09:31 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.4-fixed-class: close merged branch Message-ID: <20131114090931.ECB291C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.4-fixed-class Changeset: r68039:3cbee0b9425c Date: 2013-11-14 04:07 -0500 http://bitbucket.org/pypy/pypy/changeset/3cbee0b9425c/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:10:52 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 10:10:52 +0100 (CET) Subject: [pypy-commit] jitviewer default: Added tag pypy-2.2 for changeset 15e03325a227 Message-ID: <20131114091052.8D7641C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r253:0d780832a697 Date: 2013-11-14 10:10 +0100 http://bitbucket.org/pypy/jitviewer/changeset/0d780832a697/ Log: Added tag pypy-2.2 for changeset 15e03325a227 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -1,2 +1,3 @@ 24adc3403cd8fdcd9e3f76f31a8dc2c145471002 release-0.1 13e1f8c97ca7c47f807ea93f44392c3f48102675 pypy-1.9 +15e03325a227c4c7145a56e841b6a8a3c59730ed pypy-2.2 From noreply at buildbot.pypy.org Thu Nov 14 10:10:56 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:10:56 +0100 (CET) Subject: [pypy-commit] pypy clean-up-remaining-pypy-rlib-refs: close merged branch Message-ID: <20131114091056.9B2421C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: clean-up-remaining-pypy-rlib-refs Changeset: r68040:67779c333eea Date: 2013-11-14 04:10 -0500 http://bitbucket.org/pypy/pypy/changeset/67779c333eea/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:10:57 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:10:57 +0100 (CET) Subject: [pypy-commit] pypy coding-guide-update-rlib-refs: close merged branch Message-ID: <20131114091057.D53F51C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: coding-guide-update-rlib-refs Changeset: r68041:4230db2e2e1e Date: 2013-11-14 04:10 -0500 http://bitbucket.org/pypy/pypy/changeset/4230db2e2e1e/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:13:26 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:13:26 +0100 (CET) Subject: [pypy-commit] pypy unbreak-freebsd: close merged branch Message-ID: <20131114091326.A2D9E1C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: unbreak-freebsd Changeset: r68042:d28e53d9c806 Date: 2013-11-14 04:11 -0500 http://bitbucket.org/pypy/pypy/changeset/d28e53d9c806/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:13:27 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:13:27 +0100 (CET) Subject: [pypy-commit] pypy unquote-faster: close merged branch Message-ID: <20131114091327.E48FF1C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: unquote-faster Changeset: r68043:c5943a5c41a9 Date: 2013-11-14 04:11 -0500 http://bitbucket.org/pypy/pypy/changeset/c5943a5c41a9/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:13:29 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:13:29 +0100 (CET) Subject: [pypy-commit] pypy urlparse-unquote-faster: close merged branch Message-ID: <20131114091329.301331C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: urlparse-unquote-faster Changeset: r68044:06e7251aaed4 Date: 2013-11-14 04:12 -0500 http://bitbucket.org/pypy/pypy/changeset/06e7251aaed4/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:19:01 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:19:01 +0100 (CET) Subject: [pypy-commit] pypy nobold-backtrace: close merged branch Message-ID: <20131114091901.6D1191C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: nobold-backtrace Changeset: r68045:be560078e413 Date: 2013-11-14 04:16 -0500 http://bitbucket.org/pypy/pypy/changeset/be560078e413/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:19:04 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:19:04 +0100 (CET) Subject: [pypy-commit] pypy remove-PYPY_NOT_MAIN_FILE: close merged branch Message-ID: <20131114091904.61DDD1C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: remove-PYPY_NOT_MAIN_FILE Changeset: r68046:0a8ce6278f68 Date: 2013-11-14 04:17 -0500 http://bitbucket.org/pypy/pypy/changeset/0a8ce6278f68/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:19:05 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:19:05 +0100 (CET) Subject: [pypy-commit] pypy distutils-cppldflags: close merged branch Message-ID: <20131114091905.8D9451C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: distutils-cppldflags Changeset: r68047:c28dc4ddd9be Date: 2013-11-14 04:17 -0500 http://bitbucket.org/pypy/pypy/changeset/c28dc4ddd9be/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:19:06 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:19:06 +0100 (CET) Subject: [pypy-commit] pypy improve-errors-again: close merged branch Message-ID: <20131114091906.A6C011C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: improve-errors-again Changeset: r68048:5f5e401edccc Date: 2013-11-14 04:18 -0500 http://bitbucket.org/pypy/pypy/changeset/5f5e401edccc/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:19:07 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:19:07 +0100 (CET) Subject: [pypy-commit] pypy improve-errors-again2: close merged branch Message-ID: <20131114091907.CB2111C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: improve-errors-again2 Changeset: r68049:fc8664971578 Date: 2013-11-14 04:18 -0500 http://bitbucket.org/pypy/pypy/changeset/fc8664971578/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:19:55 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:19:55 +0100 (CET) Subject: [pypy-commit] pypy popen2-removal: close merged branch Message-ID: <20131114091955.609621C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: popen2-removal Changeset: r68050:6538f38f63da Date: 2013-11-14 04:19 -0500 http://bitbucket.org/pypy/pypy/changeset/6538f38f63da/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:23:39 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:23:39 +0100 (CET) Subject: [pypy-commit] pypy package-tk: close merged branch Message-ID: <20131114092339.C5B501C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: package-tk Changeset: r68051:ca3a4cf272be Date: 2013-11-14 04:20 -0500 http://bitbucket.org/pypy/pypy/changeset/ca3a4cf272be/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:23:43 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:23:43 +0100 (CET) Subject: [pypy-commit] pypy enumerate-rstr: close merged branch Message-ID: <20131114092343.6821E1C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: enumerate-rstr Changeset: r68052:cd07f4c70232 Date: 2013-11-14 04:21 -0500 http://bitbucket.org/pypy/pypy/changeset/cd07f4c70232/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:23:44 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:23:44 +0100 (CET) Subject: [pypy-commit] pypy ndarray-ptp: close merged branch Message-ID: <20131114092344.8C4F41C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: ndarray-ptp Changeset: r68053:3a0298093f20 Date: 2013-11-14 04:21 -0500 http://bitbucket.org/pypy/pypy/changeset/3a0298093f20/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:23:47 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:23:47 +0100 (CET) Subject: [pypy-commit] pypy rlib-doc-rpython-refs: close merged branch Message-ID: <20131114092347.37BF71C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: rlib-doc-rpython-refs Changeset: r68054:8412492e8e81 Date: 2013-11-14 04:22 -0500 http://bitbucket.org/pypy/pypy/changeset/8412492e8e81/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:25:46 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:25:46 +0100 (CET) Subject: [pypy-commit] pypy curses_fixes: close merged branch Message-ID: <20131114092546.56F3F1C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: curses_fixes Changeset: r68055:8e4f7f8df8e1 Date: 2013-11-14 04:23 -0500 http://bitbucket.org/pypy/pypy/changeset/8e4f7f8df8e1/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:25:47 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:25:47 +0100 (CET) Subject: [pypy-commit] pypy ctypes-byref: close merged branch Message-ID: <20131114092547.7A7011C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: ctypes-byref Changeset: r68056:dc20838c51f4 Date: 2013-11-14 04:24 -0500 http://bitbucket.org/pypy/pypy/changeset/dc20838c51f4/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:25:48 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:25:48 +0100 (CET) Subject: [pypy-commit] pypy statvfs_tests: close merged branch Message-ID: <20131114092548.91A2C1C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: statvfs_tests Changeset: r68057:61ce3b8b16b1 Date: 2013-11-14 04:24 -0500 http://bitbucket.org/pypy/pypy/changeset/61ce3b8b16b1/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:25:49 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:25:49 +0100 (CET) Subject: [pypy-commit] pypy fix-jit-logs: close merged branch Message-ID: <20131114092549.C47C41C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-jit-logs Changeset: r68058:ea46bf3cc485 Date: 2013-11-14 04:24 -0500 http://bitbucket.org/pypy/pypy/changeset/ea46bf3cc485/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:25:50 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:25:50 +0100 (CET) Subject: [pypy-commit] pypy pycon2013-doc-fixes: close merged branch Message-ID: <20131114092550.DDCFD1C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: pycon2013-doc-fixes Changeset: r68059:3f776b761e09 Date: 2013-11-14 04:25 -0500 http://bitbucket.org/pypy/pypy/changeset/3f776b761e09/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:26:20 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:26:20 +0100 (CET) Subject: [pypy-commit] pypy zlib-mem-pressure: close merged branch Message-ID: <20131114092620.AD3301C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: zlib-mem-pressure Changeset: r68060:bfcb4d5e68e6 Date: 2013-11-14 04:25 -0500 http://bitbucket.org/pypy/pypy/changeset/bfcb4d5e68e6/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:26:47 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:26:47 +0100 (CET) Subject: [pypy-commit] pypy task-decorator: close merged branch Message-ID: <20131114092647.C5F571C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: task-decorator Changeset: r68061:18d6db6fccf8 Date: 2013-11-14 04:26 -0500 http://bitbucket.org/pypy/pypy/changeset/18d6db6fccf8/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:28:16 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:28:16 +0100 (CET) Subject: [pypy-commit] pypy py3k-struct: close merged branch Message-ID: <20131114092816.87E3C1C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k-struct Changeset: r68062:d7746d32bf9d Date: 2013-11-14 04:27 -0500 http://bitbucket.org/pypy/pypy/changeset/d7746d32bf9d/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:28:17 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:28:17 +0100 (CET) Subject: [pypy-commit] pypy py3k-subprocess-new-session: close merged branch Message-ID: <20131114092817.B90181C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k-subprocess-new-session Changeset: r68063:3b448254587e Date: 2013-11-14 04:27 -0500 http://bitbucket.org/pypy/pypy/changeset/3b448254587e/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:28:36 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:28:36 +0100 (CET) Subject: [pypy-commit] pypy pythoninspect-fix: close merged branch Message-ID: <20131114092836.A61491C0204@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: pythoninspect-fix Changeset: r68064:56a9bd9b2769 Date: 2013-11-14 04:27 -0500 http://bitbucket.org/pypy/pypy/changeset/56a9bd9b2769/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:31:57 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:31:57 +0100 (CET) Subject: [pypy-commit] pypy py3k-list-compr-or: close merged branch Message-ID: <20131114093157.247491C13B7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k-list-compr-or Changeset: r68065:659f78e9b5b6 Date: 2013-11-14 04:31 -0500 http://bitbucket.org/pypy/pypy/changeset/659f78e9b5b6/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:32:20 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:32:20 +0100 (CET) Subject: [pypy-commit] pypy bridge-logging: close merged branch Message-ID: <20131114093220.A43E71C13B7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: bridge-logging Changeset: r68066:83f0ba55bc1b Date: 2013-11-14 04:31 -0500 http://bitbucket.org/pypy/pypy/changeset/83f0ba55bc1b/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:32:42 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:32:42 +0100 (CET) Subject: [pypy-commit] pypy ssl_moving_write_buffer: close merged branch Message-ID: <20131114093242.5F5651C13B7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: ssl_moving_write_buffer Changeset: r68067:800741417787 Date: 2013-11-14 04:32 -0500 http://bitbucket.org/pypy/pypy/changeset/800741417787/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:34:31 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:34:31 +0100 (CET) Subject: [pypy-commit] pypy improved_ebnfparse_error: close merged branch Message-ID: <20131114093431.03D221C1309@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: improved_ebnfparse_error Changeset: r68068:a1f52d6ae34e Date: 2013-11-14 04:32 -0500 http://bitbucket.org/pypy/pypy/changeset/a1f52d6ae34e/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:34:32 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 10:34:32 +0100 (CET) Subject: [pypy-commit] pypy cpyext-PyThreadState_New: close merged branch Message-ID: <20131114093432.2F9B41C1309@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: cpyext-PyThreadState_New Changeset: r68069:87a5f161be81 Date: 2013-11-14 04:33 -0500 http://bitbucket.org/pypy/pypy/changeset/87a5f161be81/ Log: close merged branch From noreply at buildbot.pypy.org Thu Nov 14 10:48:53 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 10:48:53 +0100 (CET) Subject: [pypy-commit] pypy default: Issue #1633 Message-ID: <20131114094853.45A8C1C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68070:8d1f6d47d417 Date: 2013-11-14 10:48 +0100 http://bitbucket.org/pypy/pypy/changeset/8d1f6d47d417/ Log: Issue #1633 Give a more sensible error message on int() or long() typeerrors. diff --git a/pypy/objspace/std/inttype.py b/pypy/objspace/std/inttype.py --- a/pypy/objspace/std/inttype.py +++ b/pypy/objspace/std/inttype.py @@ -119,9 +119,14 @@ if not ok: # otherwise, use the __int__() or the __trunc__() methods w_obj = w_value - if space.lookup(w_obj, '__int__') is None: + if space.lookup(w_obj, '__int__') is not None: + w_obj = space.int(w_obj) + elif space.lookup(w_obj, '__trunc__') is not None: w_obj = space.trunc(w_obj) - w_obj = space.int(w_obj) + else: + raise operationerrfmt(space.w_TypeError, + "int() argument must be a string or a number, not '%T'", + w_obj) # 'int(x)' should return what x.__int__() returned, which should # be an int or long or a subclass thereof. if space.is_w(w_inttype, space.w_int): diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py --- a/pypy/objspace/std/longtype.py +++ b/pypy/objspace/std/longtype.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter import typedef from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault,\ interpindirect2app @@ -39,13 +39,17 @@ if (space.lookup(w_obj, '__long__') is not None or space.lookup(w_obj, '__int__') is not None): w_obj = space.long(w_obj) - else: + elif space.lookup(w_obj, '__trunc__') is not None: w_obj = space.trunc(w_obj) # :-( blame CPython 2.7 if space.lookup(w_obj, '__long__') is not None: w_obj = space.long(w_obj) else: w_obj = space.int(w_obj) + else: + raise operationerrfmt(space.w_TypeError, + "long() argument must be a string or a number, not '%T'", + w_obj) bigint = space.bigint_w(w_obj) return newbigint(space, w_longtype, bigint) else: diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -498,6 +498,11 @@ b = A(5).real assert type(b) is int + def test_int_error_msg(self): + e = raises(TypeError, int, []) + assert str(e.value) == ( + "int() argument must be a string or a number, not 'list'") + class AppTestIntOptimizedAdd(AppTestInt): spaceconfig = {"objspace.std.optimized_int_add": True} diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -341,3 +341,8 @@ assert int(long(3)) == long(3) assert int(A(13)) == 42 + + def test_long_error_msg(self): + e = raises(TypeError, long, []) + assert str(e.value) == ( + "long() argument must be a string or a number, not 'list'") From noreply at buildbot.pypy.org Thu Nov 14 13:23:55 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 13:23:55 +0100 (CET) Subject: [pypy-commit] pypy default: Thanks Larry Message-ID: <20131114122355.C51381C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68071:d13b9ffb9f1a Date: 2013-11-14 13:23 +0100 http://bitbucket.org/pypy/pypy/changeset/d13b9ffb9f1a/ Log: Thanks Larry diff --git a/pypy/doc/release-2.2.0.rst b/pypy/doc/release-2.2.0.rst --- a/pypy/doc/release-2.2.0.rst +++ b/pypy/doc/release-2.2.0.rst @@ -74,7 +74,7 @@ * Things that use ``sys.set_trace`` are now JITted (like coverage) -* JSON encoding used to be very fast, now decoding is as well +* JSON decoding is now very fast (JSON encoding was already very fast) * various buffer copying methods experience speedups (like list-of-ints to ``int[]`` buffer from cffi) From noreply at buildbot.pypy.org Thu Nov 14 13:29:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 13:29:08 +0100 (CET) Subject: [pypy-commit] cffi release-0.8: Release 0.8 Message-ID: <20131114122908.A78721C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1417:2d6172481285 Date: 2013-11-14 13:28 +0100 http://bitbucket.org/cffi/cffi/changeset/2d6172481285/ Log: Release 0.8 From noreply at buildbot.pypy.org Thu Nov 14 13:30:21 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 13:30:21 +0100 (CET) Subject: [pypy-commit] cffi release-0.8: MD5/SHA1 Message-ID: <20131114123021.201541C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1418:7f64d6952c56 Date: 2013-11-14 13:30 +0100 http://bitbucket.org/cffi/cffi/changeset/7f64d6952c56/ Log: MD5/SHA1 diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -92,9 +92,9 @@ - Or grab the most current version by following the instructions below. - - MD5: ... + - MD5: e61deb0515311bb42d5d58b9403bc923 - - SHA: ... + - SHA: 8332429193cb74d74f3347af180b448425d7d176 * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From noreply at buildbot.pypy.org Thu Nov 14 13:44:26 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 14 Nov 2013 13:44:26 +0100 (CET) Subject: [pypy-commit] buildbot default: move NUMPY_64 to allegro64 and add it to the nightly-2-00 scheduler Message-ID: <20131114124426.592571C0134@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r890:b7762c17e248 Date: 2013-11-14 13:43 +0100 http://bitbucket.org/pypy/buildbot/changeset/b7762c17e248/ Log: move NUMPY_64 to allegro64 and add it to the nightly-2-00 scheduler diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -225,6 +225,9 @@ Nightly("nightly-2-00", [ JITBENCH, # on tannit32, uses 1 core (in part exclusively) JITBENCH64, # on tannit64, uses 1 core (in part exclusively) + NUMPY_64, # on allegro64, uses 1 core + # XXX maybe use a trigger instead? + ], branch='default', hour=2, minute=0), Nightly("nightly-2-00-py3k", [ @@ -235,6 +238,7 @@ Nightly("nighly-ppc", [ JITONLYLINUXPPC64, # on gcc1 ], branch='ppc-jit-backend', hour=1, minute=0), + CustomForceScheduler('Force Scheduler', builderNames=[ PYPYBUILDBOT, @@ -437,11 +441,10 @@ 'category': 'openindiana32', }, {'name': NUMPY_64, - 'slavenames': ["tannit64"], + 'slavenames': ["allegro64"], 'builddir': NUMPY_64, 'factory': pypyNumpyCompatability, 'category': 'numpy', - 'locks': [TannitCPU.access('counting')], }, {'name': PYPYBUILDBOT, 'slavenames': ['cobra'], From noreply at buildbot.pypy.org Thu Nov 14 14:46:08 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 14 Nov 2013 14:46:08 +0100 (CET) Subject: [pypy-commit] buildbot default: move NUMPY_64 back to tannit64 Message-ID: <20131114134608.5600A1C0134@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r891:4d6e4e3813f2 Date: 2013-11-14 14:45 +0100 http://bitbucket.org/pypy/buildbot/changeset/4d6e4e3813f2/ Log: move NUMPY_64 back to tannit64 diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -223,10 +223,10 @@ ], branch='default', hour=0, minute=0), Nightly("nightly-2-00", [ + NUMPY_64, # on tannit64, uses 1 core, takes about 15min. + # XXX maybe use a trigger instead? JITBENCH, # on tannit32, uses 1 core (in part exclusively) JITBENCH64, # on tannit64, uses 1 core (in part exclusively) - NUMPY_64, # on allegro64, uses 1 core - # XXX maybe use a trigger instead? ], branch='default', hour=2, minute=0), @@ -441,10 +441,11 @@ 'category': 'openindiana32', }, {'name': NUMPY_64, - 'slavenames': ["allegro64"], + 'slavenames': ["tannit64"], 'builddir': NUMPY_64, 'factory': pypyNumpyCompatability, 'category': 'numpy', + 'locks': [TannitCPU.access('counting')], }, {'name': PYPYBUILDBOT, 'slavenames': ['cobra'], From noreply at buildbot.pypy.org Thu Nov 14 19:02:42 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:42 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 7f1adb56a558 on branch bigint-with-int Message-ID: <20131114180242.8A3C21C1309@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68073:47504fdf3522 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/47504fdf3522/ Log: Merge closed head 7f1adb56a558 on branch bigint-with-int From noreply at buildbot.pypy.org Thu Nov 14 19:02:41 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:41 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head e57b90333d9a on branch default Message-ID: <20131114180241.6B2DA1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68072:87b139c5154a Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/87b139c5154a/ Log: Merge closed head e57b90333d9a on branch default diff --git a/.hgsubstate b/.hgsubstate new file mode 100644 From noreply at buildbot.pypy.org Thu Nov 14 19:02:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:43 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 6a5f3429faad on branch optmodel-refactor Message-ID: <20131114180243.99E5D1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68074:d42e9c73d526 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/d42e9c73d526/ Log: Merge closed head 6a5f3429faad on branch optmodel-refactor From noreply at buildbot.pypy.org Thu Nov 14 19:02:44 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:44 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 31b7c9223f79 on branch rdict-experiments-2 Message-ID: <20131114180244.A68DE1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68075:ff0c1dd2927c Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/ff0c1dd2927c/ Log: Merge closed head 31b7c9223f79 on branch rdict-experiments-2 From noreply at buildbot.pypy.org Thu Nov 14 19:02:45 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:45 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 9ad0c80d9cff on branch remove-numpypy Message-ID: <20131114180245.AABCF1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68076:a76ac33d372a Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/a76ac33d372a/ Log: Merge closed head 9ad0c80d9cff on branch remove-numpypy From noreply at buildbot.pypy.org Thu Nov 14 19:02:46 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:46 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head cf222adc939f on branch numpy-newbyteorder Message-ID: <20131114180246.BBEDC1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68077:333251320e76 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/333251320e76/ Log: Merge closed head cf222adc939f on branch numpy-newbyteorder From noreply at buildbot.pypy.org Thu Nov 14 19:02:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:47 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 9d9386541679 on branch cleanup-numpypy-namespace Message-ID: <20131114180247.C49971C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68078:cacf7ddae3aa Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/cacf7ddae3aa/ Log: Merge closed head 9d9386541679 on branch cleanup-numpypy-namespace From noreply at buildbot.pypy.org Thu Nov 14 19:02:48 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:48 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 3271bde53cb5 on branch numpypy-inplace-op Message-ID: <20131114180248.C620A1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68079:2f71dbdd9470 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/2f71dbdd9470/ Log: Merge closed head 3271bde53cb5 on branch numpypy-inplace-op From noreply at buildbot.pypy.org Thu Nov 14 19:02:49 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:49 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 3112b0eadb86 on branch numpypy_count_nonzero Message-ID: <20131114180249.CA4111C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68080:998704dde1e3 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/998704dde1e3/ Log: Merge closed head 3112b0eadb86 on branch numpypy_count_nonzero From noreply at buildbot.pypy.org Thu Nov 14 19:02:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:51 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head ebf84dd7f970 on branch dtype-isnative Message-ID: <20131114180251.6FA1E1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68081:be7a78744f04 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/be7a78744f04/ Log: Merge closed head ebf84dd7f970 on branch dtype-isnative From noreply at buildbot.pypy.org Thu Nov 14 19:02:52 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:52 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 9d2bc73626af on branch ndmin Message-ID: <20131114180252.7843B1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68082:642ab92e26cf Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/642ab92e26cf/ Log: Merge closed head 9d2bc73626af on branch ndmin From noreply at buildbot.pypy.org Thu Nov 14 19:02:53 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:53 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 7630ca70381b on branch array_equal Message-ID: <20131114180253.745281C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68083:7a010056514c Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/7a010056514c/ Log: Merge closed head 7630ca70381b on branch array_equal From noreply at buildbot.pypy.org Thu Nov 14 19:02:54 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:54 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 3fec18025f0d on branch pickle-dumps Message-ID: <20131114180254.8AA301C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68084:b6d66231dba3 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/b6d66231dba3/ Log: Merge closed head 3fec18025f0d on branch pickle-dumps From noreply at buildbot.pypy.org Thu Nov 14 19:02:55 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:55 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 45ecc4a42058 on branch win32-fixes Message-ID: <20131114180255.899C81C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68085:6d4fbb25fa85 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/6d4fbb25fa85/ Log: Merge closed head 45ecc4a42058 on branch win32-fixes From noreply at buildbot.pypy.org Thu Nov 14 19:02:56 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:56 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 09e6c7429ebb on branch stdlib-2.7.4-poll-fix Message-ID: <20131114180256.91D2E1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68086:aef60966ce17 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/aef60966ce17/ Log: Merge closed head 09e6c7429ebb on branch stdlib-2.7.4-poll-fix From noreply at buildbot.pypy.org Thu Nov 14 19:02:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:57 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 048031fbb4f9 on branch stdlib-2.7.4-pwd-fix Message-ID: <20131114180257.97F2C1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68087:57969d79b842 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/57969d79b842/ Log: Merge closed head 048031fbb4f9 on branch stdlib-2.7.4-pwd-fix From noreply at buildbot.pypy.org Thu Nov 14 19:02:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:58 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head e6b76694838f on branch stdlib-2.7.4-fixed-io Message-ID: <20131114180258.9AD711C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68088:d435baaea47f Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/d435baaea47f/ Log: Merge closed head e6b76694838f on branch stdlib-2.7.4-fixed-io From noreply at buildbot.pypy.org Thu Nov 14 19:02:59 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:02:59 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 3cbee0b9425c on branch stdlib-2.7.4-fixed-class Message-ID: <20131114180259.9A2801C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68089:8834bb5926f2 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/8834bb5926f2/ Log: Merge closed head 3cbee0b9425c on branch stdlib-2.7.4-fixed-class From noreply at buildbot.pypy.org Thu Nov 14 19:03:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:00 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 67779c333eea on branch clean-up-remaining-pypy-rlib-refs Message-ID: <20131114180300.98BE11C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68090:6b3ebcd26d8a Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/6b3ebcd26d8a/ Log: Merge closed head 67779c333eea on branch clean-up-remaining-pypy- rlib-refs From noreply at buildbot.pypy.org Thu Nov 14 19:03:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:01 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 4230db2e2e1e on branch coding-guide-update-rlib-refs Message-ID: <20131114180301.A593B1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68091:140a5b7e9376 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/140a5b7e9376/ Log: Merge closed head 4230db2e2e1e on branch coding-guide-update-rlib- refs From noreply at buildbot.pypy.org Thu Nov 14 19:03:02 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:02 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head d28e53d9c806 on branch unbreak-freebsd Message-ID: <20131114180302.A9B741C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68092:10757ec6ab02 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/10757ec6ab02/ Log: Merge closed head d28e53d9c806 on branch unbreak-freebsd From noreply at buildbot.pypy.org Thu Nov 14 19:03:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:03 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head c5943a5c41a9 on branch unquote-faster Message-ID: <20131114180303.A92141C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68093:5394ef2bda3d Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/5394ef2bda3d/ Log: Merge closed head c5943a5c41a9 on branch unquote-faster From noreply at buildbot.pypy.org Thu Nov 14 19:03:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:04 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 06e7251aaed4 on branch urlparse-unquote-faster Message-ID: <20131114180304.AB6281C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68094:2f23727661fa Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/2f23727661fa/ Log: Merge closed head 06e7251aaed4 on branch urlparse-unquote-faster From noreply at buildbot.pypy.org Thu Nov 14 19:03:05 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:05 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head be560078e413 on branch nobold-backtrace Message-ID: <20131114180305.A93951C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68095:98531cd04c3a Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/98531cd04c3a/ Log: Merge closed head be560078e413 on branch nobold-backtrace From noreply at buildbot.pypy.org Thu Nov 14 19:03:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:06 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 0a8ce6278f68 on branch remove-PYPY_NOT_MAIN_FILE Message-ID: <20131114180306.B59931C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68096:e6a2260ea787 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/e6a2260ea787/ Log: Merge closed head 0a8ce6278f68 on branch remove-PYPY_NOT_MAIN_FILE From noreply at buildbot.pypy.org Thu Nov 14 19:03:07 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:07 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head c28dc4ddd9be on branch distutils-cppldflags Message-ID: <20131114180307.C0D0B1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68097:47e35549c6d4 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/47e35549c6d4/ Log: Merge closed head c28dc4ddd9be on branch distutils-cppldflags From noreply at buildbot.pypy.org Thu Nov 14 19:03:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:08 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 5f5e401edccc on branch improve-errors-again Message-ID: <20131114180308.C1F0A1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68098:2051fc50063a Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/2051fc50063a/ Log: Merge closed head 5f5e401edccc on branch improve-errors-again From noreply at buildbot.pypy.org Thu Nov 14 19:03:09 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:09 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head fc8664971578 on branch improve-errors-again2 Message-ID: <20131114180309.C12041C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68099:4edea7455192 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/4edea7455192/ Log: Merge closed head fc8664971578 on branch improve-errors-again2 From noreply at buildbot.pypy.org Thu Nov 14 19:03:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:10 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 6538f38f63da on branch popen2-removal Message-ID: <20131114180310.C2F6F1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68100:39ccd08e0b72 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/39ccd08e0b72/ Log: Merge closed head 6538f38f63da on branch popen2-removal From noreply at buildbot.pypy.org Thu Nov 14 19:03:11 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:11 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head ca3a4cf272be on branch package-tk Message-ID: <20131114180311.BDF8B1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68101:9ac54ab0dbe0 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/9ac54ab0dbe0/ Log: Merge closed head ca3a4cf272be on branch package-tk From noreply at buildbot.pypy.org Thu Nov 14 19:03:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:12 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head cd07f4c70232 on branch enumerate-rstr Message-ID: <20131114180312.C6F061C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68102:4ed00f76a576 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/4ed00f76a576/ Log: Merge closed head cd07f4c70232 on branch enumerate-rstr From noreply at buildbot.pypy.org Thu Nov 14 19:03:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:13 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 3a0298093f20 on branch ndarray-ptp Message-ID: <20131114180313.D311A1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68103:63866b92a1ff Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/63866b92a1ff/ Log: Merge closed head 3a0298093f20 on branch ndarray-ptp From noreply at buildbot.pypy.org Thu Nov 14 19:03:14 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:14 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 8412492e8e81 on branch rlib-doc-rpython-refs Message-ID: <20131114180314.D5FF61C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68104:fab5aaa5a12b Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/fab5aaa5a12b/ Log: Merge closed head 8412492e8e81 on branch rlib-doc-rpython-refs From noreply at buildbot.pypy.org Thu Nov 14 19:03:15 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:15 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 8e4f7f8df8e1 on branch curses_fixes Message-ID: <20131114180315.DEAF71C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68105:fb503a734353 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/fb503a734353/ Log: Merge closed head 8e4f7f8df8e1 on branch curses_fixes From noreply at buildbot.pypy.org Thu Nov 14 19:03:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:17 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head dc20838c51f4 on branch ctypes-byref Message-ID: <20131114180317.1321B1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68106:23c332c08fd1 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/23c332c08fd1/ Log: Merge closed head dc20838c51f4 on branch ctypes-byref From noreply at buildbot.pypy.org Thu Nov 14 19:03:18 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:18 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 61ce3b8b16b1 on branch statvfs_tests Message-ID: <20131114180318.1AD971C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68107:2c59a6c70918 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/2c59a6c70918/ Log: Merge closed head 61ce3b8b16b1 on branch statvfs_tests From noreply at buildbot.pypy.org Thu Nov 14 19:03:19 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:19 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head ea46bf3cc485 on branch fix-jit-logs Message-ID: <20131114180319.2A4961C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68108:bc5a4505f3cf Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/bc5a4505f3cf/ Log: Merge closed head ea46bf3cc485 on branch fix-jit-logs From noreply at buildbot.pypy.org Thu Nov 14 19:03:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:20 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 3f776b761e09 on branch pycon2013-doc-fixes Message-ID: <20131114180320.2D8691C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68109:0cc3a7eb47cf Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/0cc3a7eb47cf/ Log: Merge closed head 3f776b761e09 on branch pycon2013-doc-fixes From noreply at buildbot.pypy.org Thu Nov 14 19:03:21 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:21 +0100 (CET) Subject: [pypy-commit] pypy default: Small updates Message-ID: <20131114180321.594031C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68110:e5911b74577f Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/e5911b74577f/ Log: Small updates diff --git a/pypy/tool/clean_old_branches.py b/pypy/tool/clean_old_branches.py --- a/pypy/tool/clean_old_branches.py +++ b/pypy/tool/clean_old_branches.py @@ -20,8 +20,8 @@ for line in result: if len(line) != 3: raise ValueError("'result' contains: %r" % line) - result = [(head, branch) for (head, branch, extra) in result - if branch not in ['', 'closed-branches'] and 'close' in extra] + result = [(head, branch or 'default') for (head, branch, extra) in result + if branch != 'closed-branches' and 'close=1' in extra] return result @@ -38,7 +38,8 @@ for head, branch in closed_heads: print '\t', head, '\t', branch print -print 'The branches listed above will be merged to "closed-branches".' +print 'The %d branches listed above will be merged to "closed-branches".' % ( + len(closed_heads),) print 'You need to run this script in a clean working copy where you' print 'don''t mind all files being removed.' print From noreply at buildbot.pypy.org Thu Nov 14 19:03:22 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:22 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head bfcb4d5e68e6 on branch zlib-mem-pressure Message-ID: <20131114180322.5B1001C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68111:9ab8a8397a95 Date: 2013-11-14 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/9ab8a8397a95/ Log: Merge closed head bfcb4d5e68e6 on branch zlib-mem-pressure From noreply at buildbot.pypy.org Thu Nov 14 19:03:23 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:23 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 18d6db6fccf8 on branch task-decorator Message-ID: <20131114180323.5E38B1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68112:aed7cfe91a46 Date: 2013-11-14 19:01 +0100 http://bitbucket.org/pypy/pypy/changeset/aed7cfe91a46/ Log: Merge closed head 18d6db6fccf8 on branch task-decorator From noreply at buildbot.pypy.org Thu Nov 14 19:03:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:24 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head d7746d32bf9d on branch py3k-struct Message-ID: <20131114180324.5A9C71C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68113:2ee19ed8f81f Date: 2013-11-14 19:01 +0100 http://bitbucket.org/pypy/pypy/changeset/2ee19ed8f81f/ Log: Merge closed head d7746d32bf9d on branch py3k-struct From noreply at buildbot.pypy.org Thu Nov 14 19:03:25 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:25 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 3b448254587e on branch py3k-subprocess-new-session Message-ID: <20131114180325.5CA951C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68114:6230319bfb3f Date: 2013-11-14 19:01 +0100 http://bitbucket.org/pypy/pypy/changeset/6230319bfb3f/ Log: Merge closed head 3b448254587e on branch py3k-subprocess-new-session From noreply at buildbot.pypy.org Thu Nov 14 19:03:26 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:26 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 56a9bd9b2769 on branch pythoninspect-fix Message-ID: <20131114180326.645CF1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68115:88350d86996d Date: 2013-11-14 19:01 +0100 http://bitbucket.org/pypy/pypy/changeset/88350d86996d/ Log: Merge closed head 56a9bd9b2769 on branch pythoninspect-fix From noreply at buildbot.pypy.org Thu Nov 14 19:03:27 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:27 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 659f78e9b5b6 on branch py3k-list-compr-or Message-ID: <20131114180327.77E891C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68116:5fe418c3c217 Date: 2013-11-14 19:01 +0100 http://bitbucket.org/pypy/pypy/changeset/5fe418c3c217/ Log: Merge closed head 659f78e9b5b6 on branch py3k-list-compr-or From noreply at buildbot.pypy.org Thu Nov 14 19:03:28 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:28 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 83f0ba55bc1b on branch bridge-logging Message-ID: <20131114180328.C95AD1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68117:7f78cebd672e Date: 2013-11-14 19:01 +0100 http://bitbucket.org/pypy/pypy/changeset/7f78cebd672e/ Log: Merge closed head 83f0ba55bc1b on branch bridge-logging From noreply at buildbot.pypy.org Thu Nov 14 19:03:29 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:29 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 800741417787 on branch ssl_moving_write_buffer Message-ID: <20131114180329.CB21F1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68118:582320f11d88 Date: 2013-11-14 19:01 +0100 http://bitbucket.org/pypy/pypy/changeset/582320f11d88/ Log: Merge closed head 800741417787 on branch ssl_moving_write_buffer From noreply at buildbot.pypy.org Thu Nov 14 19:03:30 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:30 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head a1f52d6ae34e on branch improved_ebnfparse_error Message-ID: <20131114180330.D1C8A1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68119:b72f0aea3f18 Date: 2013-11-14 19:01 +0100 http://bitbucket.org/pypy/pypy/changeset/b72f0aea3f18/ Log: Merge closed head a1f52d6ae34e on branch improved_ebnfparse_error From noreply at buildbot.pypy.org Thu Nov 14 19:03:31 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:31 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: Merge closed head 87a5f161be81 on branch cpyext-PyThreadState_New Message-ID: <20131114180331.D05BF1C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68120:c91c7524a1c9 Date: 2013-11-14 19:01 +0100 http://bitbucket.org/pypy/pypy/changeset/c91c7524a1c9/ Log: Merge closed head 87a5f161be81 on branch cpyext-PyThreadState_New From noreply at buildbot.pypy.org Thu Nov 14 19:03:32 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 19:03:32 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: re-close this branch Message-ID: <20131114180332.D3CC61C12EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r68121:58cc9b35e02f Date: 2013-11-14 19:01 +0100 http://bitbucket.org/pypy/pypy/changeset/58cc9b35e02f/ Log: re-close this branch From noreply at buildbot.pypy.org Thu Nov 14 21:21:41 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 21:21:41 +0100 (CET) Subject: [pypy-commit] pypy default: rcomplex.c_pow does this Message-ID: <20131114202141.260A51C025A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68122:1e79df0ae309 Date: 2013-11-14 15:15 -0500 http://bitbucket.org/pypy/pypy/changeset/1e79df0ae309/ Log: rcomplex.c_pow does this diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1260,10 +1260,6 @@ @complex_binary_op def pow(self, v1, v2): - if v1[1] == 0 and v2[1] == 0 and v1[0] > 0: - return math.pow(v1[0], v2[0]), 0 - #if not rfloat.isfinite(v1[0]) or not rfloat.isfinite(v1[1]): - # return rfloat.NAN, rfloat.NAN try: return rcomplex.c_pow(v1, v2) except ZeroDivisionError: From noreply at buildbot.pypy.org Thu Nov 14 22:09:50 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 14 Nov 2013 22:09:50 +0100 (CET) Subject: [pypy-commit] pypy default: simplify by using newbyteorder here Message-ID: <20131114210950.A42191C025A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68123:9c0fabe8214b Date: 2013-11-14 16:09 -0500 http://bitbucket.org/pypy/pypy/changeset/9c0fabe8214b/ Log: simplify by using newbyteorder here diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -519,13 +519,7 @@ # by converting nonnative byte order. if self.is_scalar(): return space.wrap(0) - if not self.get_dtype().is_flexible_type(): - s = self.get_dtype().name - if not self.get_dtype().is_native(): - s = s[1:] - dtype = interp_dtype.get_dtype_cache(space).dtypes_by_name[s] - else: - dtype = self.get_dtype() + dtype = self.get_dtype().descr_newbyteorder(space, NPY_NATIVE) contig = self.implementation.astype(space, dtype) return contig.argsort(space, w_axis) From noreply at buildbot.pypy.org Thu Nov 14 23:20:27 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Nov 2013 23:20:27 +0100 (CET) Subject: [pypy-commit] pypy default: Rewrite the introduction, linking to SECCOMP and to Victor Stinner's Message-ID: <20131114222027.AB4BE1C025A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68124:471b407d8d5b Date: 2013-11-14 23:19 +0100 http://bitbucket.org/pypy/pypy/changeset/471b407d8d5b/ Log: Rewrite the introduction, linking to SECCOMP and to Victor Stinner's mail on python-dev about pysandbox. diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -4,26 +4,36 @@ Introduction ------------ -It is possible to compile a version of pypy-c that runs -fully "virtualized", i.e. where an external process controls all -input/output. Such a pypy-c is a secure sandbox: it is safe to run -any untrusted Python code with it. The Python code cannot see or -modify any local file except via interaction with the external -process. It is also impossible to do any other I/O or consume more -than some amount of RAM or CPU time or real time. This works with no -OS support at all - just ANSI C code generated in a careful way. It's -the kind of thing you could embed in a browser plug-in, for example -(it would be safe even if it wasn't run as a separate process, -actually). +PyPy offers sandboxing at a level similar to OS-level sandboxing (e.g. +SECCOMP_ on Linux), but implemented in a fully portable way. To use it, +a (regular, trusted) program launches a subprocess that is a special +sandboxed version of PyPy. This subprocess can run arbitrary untrusted +Python code, but all its input/output is serialized to a stdin/stdout +pipe instead of being directly performed. The outer process reads the +pipe and decides which commands are allowed or not (sandboxing), or even +reinterprets them differently (virtualization). A potential attacker +can have arbitrary code run in the subprocess, but cannot actually do +any input/output not controlled by the outer process. Additional +barriers are put to limit the amount of RAM and CPU time used. -For comparison, trying to plug CPython into a special virtualizing C -library is not only OS-specific, but unsafe, because one of the known -ways to segfault CPython could be used by an attacker to trick CPython -into issuing malicious system calls directly. The C code generated by +Note that this is very different from sandboxing at the Python language +level, i.e. placing restrictions on what kind of Python code the +attacker is allowed to run (why? read about pysandbox_). + +.. _SECCOMP: http://code.google.com/p/seccompsandbox/wiki/overview +.. _pysandbox: https://mail.python.org/pipermail/python-dev/2013-November/130132.html + +Another point of comparison: if we were instead to try to plug CPython +into a special virtualizing C library, we would get a result +that is not only OS-specific, but unsafe, because CPython can be +segfaulted (in many ways, all of them really, really obscure). +Given enough efforts, an attacker can turn almost any +segfault into a vulnerability. The C code generated by PyPy is not segfaultable, as long as our code generators are correct - -that's a lower number of lines of code to trust. For the paranoid, in -this case we also generate systematic run-time checks against buffer -overflows. +that's a lower number of lines of code to trust. For the paranoid, +PyPy translated with sandboxing also contains systematic run-time +checks (against buffer overflows for example) +that are normally only present in debugging versions. .. warning:: From noreply at buildbot.pypy.org Fri Nov 15 01:58:38 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 15 Nov 2013 01:58:38 +0100 (CET) Subject: [pypy-commit] pypy default: put getitem/setitem/fill on array, not dtype Message-ID: <20131115005838.3764E1C030D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68125:bdb4b5232832 Date: 2013-11-14 19:44 -0500 http://bitbucket.org/pypy/pypy/changeset/bdb4b5232832/ Log: put getitem/setitem/fill on array, not dtype diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -36,10 +36,13 @@ return backstrides def getitem(self, index): - return self.dtype.getitem(self, index) + return self.dtype.itemtype.read(self, index, 0) + + def getitem_bool(self, index): + return self.dtype.itemtype.read_bool(self, index, 0) def setitem(self, index, value): - self.dtype.setitem(self, index, value) + self.dtype.itemtype.store(self, index, 0, value) def setslice(self, space, arr): impl = arr.implementation @@ -268,7 +271,7 @@ def create_dot_iter(self, shape, skip): r = calculate_dot_strides(self.get_strides(), self.get_backstrides(), shape, skip) - return iter.MultiDimViewIterator(self, self.dtype, self.start, r[0], r[1], shape) + return iter.MultiDimViewIterator(self, self.start, r[0], r[1], shape) def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] @@ -331,21 +334,24 @@ support.product(shape) > support.product(self.get_shape()): r = calculate_broadcast_strides(self.get_strides(), self.get_backstrides(), - self.get_shape(), shape, backward_broadcast) - return iter.MultiDimViewIterator(self, self.dtype, self.start, r[0], r[1], shape) - + self.get_shape(), shape, + backward_broadcast) + return iter.MultiDimViewIterator(self, self.start, + r[0], r[1], shape) if not require_index: return iter.ConcreteArrayIterator(self) - else: - if len(self.get_shape()) == 1: - return iter.OneDimViewIterator(self, self.dtype, self.start, - self.get_strides(), self.get_shape()) - else: - return iter.MultiDimViewIterator(self, self.dtype, self.start, - self.get_strides(), self.get_backstrides(), self.get_shape()) + if len(self.get_shape()) == 1: + return iter.OneDimViewIterator(self, self.start, + self.get_strides(), + self.get_shape()) + return iter.MultiDimViewIterator(self, self.start, + self.get_strides(), + self.get_backstrides(), + self.get_shape()) def fill(self, box): - self.dtype.fill(self.storage, box, 0, self.size) + self.dtype.itemtype.fill(self.storage, self.dtype.get_size(), + box, 0, self.size, 0) def set_shape(self, space, orig_array, new_shape): strides, backstrides = support.calc_strides(new_shape, self.dtype, @@ -416,14 +422,16 @@ self.get_backstrides(), self.get_shape(), shape, backward_broadcast) - return iter.MultiDimViewIterator(self.parent, self.dtype, - self.start, r[0], r[1], shape) + return iter.MultiDimViewIterator(self, self.start, + r[0], r[1], shape) if len(self.get_shape()) == 1: - return iter.OneDimViewIterator(self.parent, self.dtype, self.start, - self.get_strides(), self.get_shape()) - return iter.MultiDimViewIterator(self.parent, self.dtype, self.start, - self.get_strides(), - self.get_backstrides(), self.get_shape()) + return iter.OneDimViewIterator(self, self.start, + self.get_strides(), + self.get_shape()) + return iter.MultiDimViewIterator(self, self.start, + self.get_strides(), + self.get_backstrides(), + self.get_shape()) def set_shape(self, space, orig_array, new_shape): if len(self.get_shape()) < 2 or self.size == 0: diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -81,19 +81,6 @@ def coerce(self, space, w_item): return self.itemtype.coerce(space, self, w_item) - def getitem(self, arr, i): - item = self.itemtype.read(arr, i, 0) - return item - - def getitem_bool(self, arr, i): - return self.itemtype.read_bool(arr, i, 0) - - def setitem(self, arr, i, box): - self.itemtype.store(arr, i, 0, box) - - def fill(self, storage, box, start, stop): - self.itemtype.fill(storage, self.get_size(), box, start, stop, 0) - def is_int_type(self): return (self.kind == NPY_SIGNEDLTR or self.kind == NPY_UNSIGNEDLTR or self.kind == NPY_GENBOOLLTR) diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -159,23 +159,21 @@ return [space.wrap(self.indexes[i]) for i in range(shapelen)] class ConcreteArrayIterator(base.BaseArrayIterator): - _immutable_fields_ = ['dtype', 'skip', 'size'] + _immutable_fields_ = ['array', 'skip', 'size'] def __init__(self, array): self.array = array self.offset = 0 - self.dtype = array.dtype - self.skip = self.dtype.get_size() + self.skip = array.dtype.get_size() self.size = array.size def setitem(self, elem): - self.dtype.setitem(self.array, self.offset, elem) + self.array.setitem(self.offset, elem) def getitem(self): - item = self.dtype.getitem(self.array, self.offset) - return item + return self.array.getitem(self.offset) def getitem_bool(self): - return self.dtype.getitem_bool(self.array, self.offset) + return self.array.getitem_bool(self.offset) def next(self): self.offset += self.skip @@ -190,12 +188,8 @@ self.offset %= self.size class OneDimViewIterator(ConcreteArrayIterator): - ''' The view iterator dtype can be different from the - array.dtype, this is what makes it a View - ''' - def __init__(self, array, dtype, start, strides, shape): + def __init__(self, array, start, strides, shape): self.array = array - self.dtype = dtype self.offset = start self.skip = strides[0] self.index = 0 @@ -219,13 +213,9 @@ return self.index class MultiDimViewIterator(ConcreteArrayIterator): - ''' The view iterator dtype can be different from the - array.dtype, this is what makes it a View - ''' - def __init__(self, array, dtype, start, strides, backstrides, shape): + def __init__(self, array, start, strides, backstrides, shape): self.indexes = [0] * len(shape) self.array = array - self.dtype = dtype self.shape = shape self.offset = start self.shapelen = len(shape) @@ -295,14 +285,12 @@ self.offset = array.start self.dim = dim self.array = array - self.dtype = array.dtype def setitem(self, elem): - self.dtype.setitem(self.array, self.offset, elem) + self.array.setitem(self.offset, elem) def getitem(self): - item = self.dtype.getitem(self.array, self.offset) - return item + return self.array.getitem(self.offset) @jit.unroll_safe def next(self): diff --git a/pypy/module/micronumpy/test/test_iter.py b/pypy/module/micronumpy/test/test_iter.py --- a/pypy/module/micronumpy/test/test_iter.py +++ b/pypy/module/micronumpy/test/test_iter.py @@ -13,7 +13,7 @@ strides = [5, 1] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [10, 4] - i = MultiDimViewIterator(MockArray, None, start, strides, backstrides, shape) + i = MultiDimViewIterator(MockArray, start, strides, backstrides, shape) i.next() i.next() i.next() @@ -31,7 +31,7 @@ strides = [1, 3] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [2, 12] - i = MultiDimViewIterator(MockArray, None, start, strides, backstrides, shape) + i = MultiDimViewIterator(MockArray, start, strides, backstrides, shape) i.next() i.next() i.next() @@ -52,7 +52,7 @@ strides = [5, 1] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [10, 4] - i = MultiDimViewIterator(MockArray, None, start, strides, backstrides, shape) + i = MultiDimViewIterator(MockArray, start, strides, backstrides, shape) i.next_skip_x(2) i.next_skip_x(2) i.next_skip_x(2) @@ -75,7 +75,7 @@ strides = [1, 3] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [2, 12] - i = MultiDimViewIterator(MockArray, None, start, strides, backstrides, shape) + i = MultiDimViewIterator(MockArray, start, strides, backstrides, shape) i.next_skip_x(2) i.next_skip_x(2) i.next_skip_x(2) From noreply at buildbot.pypy.org Fri Nov 15 02:13:02 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 15 Nov 2013 02:13:02 +0100 (CET) Subject: [pypy-commit] pypy default: test/fix int/trunc behavior Message-ID: <20131115011302.78FDE1C030D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68126:6f5e3c868dc6 Date: 2013-11-14 20:12 -0500 http://bitbucket.org/pypy/pypy/changeset/6f5e3c868dc6/ Log: test/fix int/trunc behavior diff --git a/pypy/objspace/std/inttype.py b/pypy/objspace/std/inttype.py --- a/pypy/objspace/std/inttype.py +++ b/pypy/objspace/std/inttype.py @@ -119,14 +119,14 @@ if not ok: # otherwise, use the __int__() or the __trunc__() methods w_obj = w_value - if space.lookup(w_obj, '__int__') is not None: - w_obj = space.int(w_obj) - elif space.lookup(w_obj, '__trunc__') is not None: - w_obj = space.trunc(w_obj) - else: - raise operationerrfmt(space.w_TypeError, - "int() argument must be a string or a number, not '%T'", - w_obj) + if space.lookup(w_obj, '__int__') is None: + if space.lookup(w_obj, '__trunc__') is not None: + w_obj = space.trunc(w_obj) + else: + raise operationerrfmt(space.w_TypeError, + "int() argument must be a string or a number, not '%T'", + w_obj) + w_obj = space.int(w_obj) # 'int(x)' should return what x.__int__() returned, which should # be an int or long or a subclass thereof. if space.is_w(w_inttype, space.w_int): diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -7,7 +7,6 @@ class TestW_IntObject: - def _longshiftresult(self, x): """ calculate an overflowing shift """ n = 1 @@ -40,7 +39,7 @@ space = self.space assert isinstance(space.bigint_w(space.wrap(42)), rbigint) assert space.bigint_w(space.wrap(42)).eq(rbigint.fromint(42)) - + def test_repr(self): x = 1 f1 = iobj.W_IntObject(x) @@ -71,7 +70,7 @@ method = getattr(iobj, '%s__Int_Int' % op) myres = method(self.space, wx, wy) assert self.space.unwrap(myres) == res - + def test_add(self): x = 1 y = 2 @@ -283,8 +282,8 @@ result = iobj.hex__Int(self.space, f1) assert self.space.unwrap(result) == hex(x) + class AppTestInt: - def test_conjugate(self): assert (1).conjugate() == 1 assert (-1).conjugate() == -1 @@ -326,7 +325,7 @@ assert "42" == str(42) assert "42" == repr(42) raises(ValueError, int, '0x2A') - + def test_int_two_param(self): assert 42 == int('0x2A', 0) assert 42 == int('2A', 16) @@ -431,28 +430,28 @@ def test_special_int(self): class a(object): - def __int__(self): - self.ar = True + def __int__(self): + self.ar = True return None inst = a() - raises(TypeError, int, inst) + raises(TypeError, int, inst) assert inst.ar == True - class b(object): + class b(object): pass raises((AttributeError,TypeError), int, b()) def test_special_long(self): class a(object): - def __long__(self): - self.ar = True + def __long__(self): + self.ar = True return None inst = a() - raises(TypeError, long, inst) - assert inst.ar == True + raises(TypeError, long, inst) + assert inst.ar == True - class b(object): - pass + class b(object): + pass raises((AttributeError,TypeError), long, b()) def test_just_trunc(self): @@ -470,6 +469,15 @@ pass assert int(myotherint(21)) == 21 + def test_trunc_returns_non_int(self): + class Integral(object): + def __int__(self): + return 42 + class TruncReturnsNonInt(object): + def __trunc__(self): + return Integral() + assert int(TruncReturnsNonInt()) == 42 + def test_getnewargs(self): assert 0 .__getnewargs__() == (0,) diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -41,7 +41,6 @@ class AppTestLong: - def test_trunc(self): import math assert math.trunc(1L) == 1L @@ -311,7 +310,6 @@ assert (-1<<40).bit_length() == 41 assert ((2**31)-1).bit_length() == 31 - def test_negative_zero(self): x = eval("-0L") assert x == 0L From noreply at buildbot.pypy.org Fri Nov 15 02:28:17 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 15 Nov 2013 02:28:17 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20131115012817.EDB6F1C030D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68127:08c50e623f5f Date: 2013-11-14 20:25 -0500 http://bitbucket.org/pypy/pypy/changeset/08c50e623f5f/ Log: cleanup diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -3,7 +3,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import (TypeDef, GetSetProperty, - interp_attrproperty, interp_attrproperty_w) + interp_attrproperty, interp_attrproperty_w) from pypy.module.micronumpy import types, interp_boxes, base from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong @@ -16,8 +16,9 @@ def decode_w_dtype(space, w_dtype): if space.is_none(w_dtype): return None - return space.interp_w(W_Dtype, - space.call_function(space.gettypefor(W_Dtype), w_dtype)) + return space.interp_w( + W_Dtype, space.call_function(space.gettypefor(W_Dtype), w_dtype)) + @jit.unroll_safe def dtype_agreement(space, w_arr_list, shape, out=None): @@ -34,6 +35,7 @@ out = base.W_NDimArray.from_shape(space, shape, dtype) return out + class W_Dtype(W_Root): _immutable_fields_ = ["itemtype?", "num", "kind", "name?", "char", "w_box_type", "byteorder", "size?", "float_type", @@ -92,7 +94,7 @@ return self.kind == NPY_COMPLEXLTR def is_float_type(self): - return (self.kind == NPY_FLOATINGLTR or self.float_type is not None) + return self.kind == NPY_FLOATINGLTR or self.kind == NPY_COMPLEXLTR def is_bool_type(self): return self.kind == NPY_GENBOOLLTR @@ -311,6 +313,7 @@ return W_Dtype(itemtype, self.num, self.kind, self.name, self.char, self.w_box_type, endian, size=self.size) + def dtype_from_list(space, w_lst): lst_w = space.listview(w_lst) fields = {} @@ -322,7 +325,7 @@ if space.len_w(w_elem) == 3: w_fldname, w_flddesc, w_shape = space.fixedview(w_elem) if not base.issequence_w(space, w_shape): - w_shape = space.newtuple([w_shape,]) + w_shape = space.newtuple([w_shape]) else: w_fldname, w_flddesc = space.fixedview(w_elem, 2) subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc, w_shape=w_shape) @@ -339,14 +342,17 @@ NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), fields=fields, fieldnames=fieldnames, size=offset) + def dtype_from_dict(space, w_dict): raise OperationError(space.w_NotImplementedError, space.wrap( "dtype from dict")) + def dtype_from_spec(space, name): raise OperationError(space.w_NotImplementedError, space.wrap( "dtype from spec")) + def descr__new__(space, w_subtype, w_dtype, w_align=None, w_copy=None, w_shape=None): # w_align and w_copy are necessary for pickling cache = get_dtype_cache(space) @@ -384,7 +390,7 @@ if name[0] in 'VSUc' or name[0] in '<>=' and name[1] in 'VSUc': return variable_dtype(space, name) raise OperationError(space.w_TypeError, space.wrap( - "data type %s not understood" % name)) + "data type %s not understood" % name)) elif space.isinstance_w(w_dtype, space.w_list): return dtype_from_list(space, w_dtype) elif space.isinstance_w(w_dtype, space.w_tuple): @@ -478,6 +484,7 @@ basename + str(8 * size * itemtype.get_element_size()), char, w_box_type, size=size) + def new_string_dtype(space, size): itemtype = types.StringType() return W_Dtype( @@ -490,6 +497,7 @@ w_box_type = space.gettypefor(interp_boxes.W_StringBox), ) + def new_unicode_dtype(space, size): itemtype = types.UnicodeType() return W_Dtype( @@ -558,7 +566,7 @@ name="int32", char=NPY_INTLTR, w_box_type=space.gettypefor(interp_boxes.W_Int32Box), - ) + ) self.w_uint32dtype = W_Dtype( types.UInt32(), num=NPY_UINT, @@ -577,7 +585,7 @@ alternate_constructors=[space.w_int, space.gettypefor(interp_boxes.W_IntegerBox), space.gettypefor(interp_boxes.W_SignedIntegerBox), - ], + ], aliases=['int'], ) self.w_ulongdtype = W_Dtype( @@ -587,8 +595,8 @@ name="uint%d" % LONG_BIT, char=NPY_ULONGLTR, w_box_type=space.gettypefor(interp_boxes.W_ULongBox), - alternate_constructors=[ space.gettypefor(interp_boxes.W_UnsignedIntegerBox), - ], + alternate_constructors=[space.gettypefor(interp_boxes.W_UnsignedIntegerBox), + ], aliases=['uint'], ) self.w_int64dtype = W_Dtype( @@ -628,7 +636,7 @@ w_box_type = space.gettypefor(interp_boxes.W_Float64Box), alternate_constructors=[space.w_float, space.gettypefor(interp_boxes.W_NumberBox), - ], + ], aliases=["float", "double"], ) self.w_floatlongdtype = W_Dtype( @@ -779,18 +787,18 @@ new_name = NPY_OPPBYTE + can_name itemtype = type(dtype.itemtype)(False) self.dtypes_by_name[new_name] = W_Dtype( - itemtype, - dtype.num, dtype.kind, new_name, dtype.char, dtype.w_box_type, - byteorder=NPY_OPPBYTE, float_type=dtype.float_type) + itemtype, dtype.num, dtype.kind, new_name, dtype.char, + dtype.w_box_type, byteorder=NPY_OPPBYTE, + float_type=dtype.float_type) if dtype.kind != dtype.char: can_name = dtype.char self.dtypes_by_name[NPY_NATBYTE + can_name] = dtype self.dtypes_by_name[NPY_NATIVE + can_name] = dtype new_name = NPY_OPPBYTE + can_name self.dtypes_by_name[new_name] = W_Dtype( - itemtype, - dtype.num, dtype.kind, new_name, dtype.char, dtype.w_box_type, - byteorder=NPY_OPPBYTE, float_type=dtype.float_type) + itemtype, dtype.num, dtype.kind, new_name, dtype.char, + dtype.w_box_type, byteorder=NPY_OPPBYTE, + float_type=dtype.float_type) for alias in dtype.aliases: self.dtypes_by_name[alias] = dtype @@ -845,9 +853,11 @@ itemsize = dtype.get_size() items_w = [space.wrap(dtype.char), space.wrap(dtype.num), - space.wrap(itemsize * 8), # in case of changing + space.wrap(itemsize * 8), # in case of changing # number of bits per byte in the future - space.wrap(itemsize / (2 if dtype.kind == NPY_COMPLEXLTR else 1) or 1)] + space.wrap(itemsize / + (2 if dtype.kind == NPY_COMPLEXLTR else 1) + or 1)] if dtype.is_int_type(): if dtype.kind == NPY_GENBOOLLTR: w_maxobj = space.wrap(1) @@ -864,5 +874,6 @@ space.setitem(w_typeinfo, space.wrap(k), space.newtuple(items_w)) self.w_typeinfo = w_typeinfo + def get_dtype_cache(space): return space.fromcache(DtypeCache) From noreply at buildbot.pypy.org Fri Nov 15 07:05:42 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 15 Nov 2013 07:05:42 +0100 (CET) Subject: [pypy-commit] pypy numpy-andrew-tests: close obsolete branch Message-ID: <20131115060542.A707E1D2330@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-andrew-tests Changeset: r68128:ef46b0734b9c Date: 2013-11-14 20:34 -0500 http://bitbucket.org/pypy/pypy/changeset/ef46b0734b9c/ Log: close obsolete branch From noreply at buildbot.pypy.org Fri Nov 15 07:05:43 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 15 Nov 2013 07:05:43 +0100 (CET) Subject: [pypy-commit] pypy default: provide dtype.hasobject Message-ID: <20131115060543.CEC9D1D2332@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68129:bf64e1985de3 Date: 2013-11-15 01:05 -0500 http://bitbucket.org/pypy/pypy/changeset/bf64e1985de3/ Log: provide dtype.hasobject diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -224,6 +224,9 @@ raise break + def descr_get_hasobject(self, space): + return space.w_False + def descr_getitem(self, space, w_item): if self.fields is None: raise OperationError(space.w_KeyError, space.wrap( @@ -443,6 +446,7 @@ isnative = GetSetProperty(W_Dtype.descr_get_isnative), fields = GetSetProperty(W_Dtype.descr_get_fields), names = GetSetProperty(W_Dtype.descr_get_names), + hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), ) W_Dtype.typedef.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -32,6 +32,7 @@ assert dtype('int8').name == 'int8' assert dtype(int).fields is None assert dtype(int).names is None + assert dtype(int).hasobject is False assert dtype(None) is dtype(float) From noreply at buildbot.pypy.org Fri Nov 15 07:43:21 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 15 Nov 2013 07:43:21 +0100 (CET) Subject: [pypy-commit] pypy default: fix/enable this dtype test Message-ID: <20131115064321.67F2F1C010D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68130:df81eae287fa Date: 2013-11-15 01:19 -0500 http://bitbucket.org/pypy/pypy/changeset/df81eae287fa/ Log: fix/enable this dtype test diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -143,7 +143,7 @@ if basic == NPY_UNICODELTR: size >>= 2 endian = NPY_NATBYTE - elif size <= 1: + elif size // (self.size or 1) <= 1: endian = NPY_IGNORE else: endian = self.byteorder diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -766,8 +766,8 @@ # strange assert dtype('string').str == '|S0' assert dtype('unicode').str == byteorder + 'U0' - #assert dtype(('string', 7)).str == '|S7' - #assert dtype(('unicode', 7)).str == ' Author: Brian Kearns Branch: Changeset: r68131:21243450f85a Date: 2013-11-15 01:22 -0500 http://bitbucket.org/pypy/pypy/changeset/21243450f85a/ Log: support dtype('|S#') diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -390,7 +390,7 @@ return cache.dtypes_by_name[name] except KeyError: pass - if name[0] in 'VSUc' or name[0] in '<>=' and name[1] in 'VSUc': + if name[0] in 'VSUc' or name[0] in '<>=|' and name[1] in 'VSUc': return variable_dtype(space, name) raise OperationError(space.w_TypeError, space.wrap( "data type %s not understood" % name)) @@ -452,7 +452,7 @@ def variable_dtype(space, name): - if name[0] in '<>=': + if name[0] in '<>=|': name = name[1:] char = name[0] if len(name) == 1: diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -828,13 +828,14 @@ from numpypy import dtype, str_ raises(TypeError, "dtype('Sx')") - d = dtype('S8') - assert d.itemsize == 8 - assert dtype(str) == dtype('S') - assert d.kind == 'S' - assert d.type is str_ - assert d.name == "string64" - assert d.num == 18 + for t in ['S8', '|S8', '=S8']: + d = dtype(t) + assert d.itemsize == 8 + assert dtype(str) == dtype('S') + assert d.kind == 'S' + assert d.type is str_ + assert d.name == "string64" + assert d.num == 18 for i in [1, 2, 3]: d = dtype('c%d' % i) assert d.itemsize == 1 From noreply at buildbot.pypy.org Fri Nov 15 07:43:23 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 15 Nov 2013 07:43:23 +0100 (CET) Subject: [pypy-commit] pypy default: fix an array scalar indexing case Message-ID: <20131115064323.D81141C010D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68132:b6ff9f5a0ee0 Date: 2013-11-15 01:39 -0500 http://bitbucket.org/pypy/pypy/changeset/b6ff9f5a0ee0/ Log: fix an array scalar indexing case diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -123,6 +123,9 @@ ) def descr_getitem(self, space, _, w_idx): + if space.isinstance_w(w_idx, space.w_tuple): + if space.len_w(w_idx) == 0: + return self.get_scalar_value() raise OperationError(space.w_IndexError, space.wrap("scalars cannot be indexed")) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -661,13 +661,16 @@ assert (b[newaxis] == [[2, 3, 4]]).all() def test_scalar(self): - from numpypy import array, dtype + from numpypy import array, dtype, int64 a = array(3) raises(IndexError, "a[0]") raises(IndexError, "a[0] = 5") assert a.size == 1 assert a.shape == () assert a.dtype is dtype(int) + b = a[()] + assert type(b) is int64 + assert b == 3 def test_len(self): from numpypy import array From noreply at buildbot.pypy.org Fri Nov 15 08:52:14 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 15 Nov 2013 08:52:14 +0100 (CET) Subject: [pypy-commit] pypy default: An easy fix for a large part of test_os on Windows Message-ID: <20131115075214.E6DDF1C3014@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68133:aa90b6cfcff5 Date: 2013-11-15 08:51 +0100 http://bitbucket.org/pypy/pypy/changeset/aa90b6cfcff5/ Log: An easy fix for a large part of test_os on Windows diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -75,7 +75,7 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + open(name, "w").close() self.files.append(name) def test_tempnam(self): From noreply at buildbot.pypy.org Fri Nov 15 09:42:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 15 Nov 2013 09:42:24 +0100 (CET) Subject: [pypy-commit] pypy default: Windows: attempt to fix stat() for long ints. Message-ID: <20131115084224.930D11C13FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68134:203930ef1f4f Date: 2013-11-15 09:41 +0100 http://bitbucket.org/pypy/pypy/changeset/203930ef1f4f/ Log: Windows: attempt to fix stat() for long ints. diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -466,16 +466,14 @@ def attribute_data_to_stat(info): st_mode = attributes_to_mode(info.c_dwFileAttributes) st_size = make_longlong(info.c_nFileSizeHigh, info.c_nFileSizeLow) - ctime, ctime_ns = FILE_TIME_to_time_t_nsec(info.c_ftCreationTime) - mtime, mtime_ns = FILE_TIME_to_time_t_nsec(info.c_ftLastWriteTime) - atime, atime_ns = FILE_TIME_to_time_t_nsec(info.c_ftLastAccessTime) + ctime = FILE_TIME_to_time_t_float(info.c_ftCreationTime) + mtime = FILE_TIME_to_time_t_float(info.c_ftLastWriteTime) + atime = FILE_TIME_to_time_t_float(info.c_ftLastAccessTime) result = (st_mode, 0, 0, 0, 0, 0, st_size, - float(atime) + atime_ns * 1e-9, - float(mtime) + mtime_ns * 1e-9, - float(ctime) + ctime_ns * 1e-9) + atime, mtime, ctime) return make_stat_result(result) @@ -483,9 +481,9 @@ # similar to the one above st_mode = attributes_to_mode(info.c_dwFileAttributes) st_size = make_longlong(info.c_nFileSizeHigh, info.c_nFileSizeLow) - ctime, ctime_ns = FILE_TIME_to_time_t_nsec(info.c_ftCreationTime) - mtime, mtime_ns = FILE_TIME_to_time_t_nsec(info.c_ftLastWriteTime) - atime, atime_ns = FILE_TIME_to_time_t_nsec(info.c_ftLastAccessTime) + ctime = FILE_TIME_to_time_t_float(info.c_ftCreationTime) + mtime = FILE_TIME_to_time_t_float(info.c_ftLastWriteTime) + atime = FILE_TIME_to_time_t_float(info.c_ftLastAccessTime) # specific to fstat() st_ino = make_longlong(info.c_nFileIndexHigh, info.c_nFileIndexLow) @@ -494,9 +492,7 @@ result = (st_mode, st_ino, 0, st_nlink, 0, 0, st_size, - atime + atime_ns * 1e-9, - mtime + mtime_ns * 1e-9, - ctime + ctime_ns * 1e-9) + atime, mtime, ctime) return make_stat_result(result) @@ -579,12 +575,10 @@ # Seconds between 1.1.1601 and 1.1.1970 secs_between_epochs = rffi.r_longlong(11644473600) -def FILE_TIME_to_time_t_nsec(filetime): +def FILE_TIME_to_time_t_float(filetime): ft = make_longlong(filetime.c_dwHighDateTime, filetime.c_dwLowDateTime) # FILETIME is in units of 100 nsec - nsec = (ft % 10000000) * 100 - time = (ft / 10000000) - secs_between_epochs - return intmask(time), intmask(nsec) + return float(ft) * (1.0 / 10000000.0) - secs_between_epochs def time_t_to_FILE_TIME(time, filetime): ft = rffi.r_longlong((time + secs_between_epochs) * 10000000) diff --git a/rpython/rtyper/module/test/test_ll_os.py b/rpython/rtyper/module/test/test_ll_os.py --- a/rpython/rtyper/module/test/test_ll_os.py +++ b/rpython/rtyper/module/test/test_ll_os.py @@ -76,6 +76,11 @@ t1 = 1159195039.25 compile(f, (str, float))(str(fname), t1) assert t1 == os.stat(str(fname)).st_mtime + if sys.version_info < (2, 7): + py.test.skip('requires Python 2.7') + t1 = 5000000000.0 + compile(f, (str, float))(str(fname), t1) + assert t1 == os.stat(str(fname)).st_mtime def test__getfullpathname(): if os.name != 'nt': diff --git a/rpython/rtyper/module/test/test_ll_os_stat.py b/rpython/rtyper/module/test/test_ll_os_stat.py --- a/rpython/rtyper/module/test/test_ll_os_stat.py +++ b/rpython/rtyper/module/test/test_ll_os_stat.py @@ -1,4 +1,5 @@ from rpython.rtyper.module import ll_os_stat, ll_os +from rpython.tool.udir import udir import sys, os import py @@ -33,3 +34,13 @@ fstat = ll_os_stat.make_win32_stat_impl('fstat', ll_os.StringTraits()) stat = fstat(0) # stdout assert stat.st_mode != 0 + + def test_stat_large_number(self): + if sys.version_info < (2, 7): + py.test.skip('requires Python 2.7') + fname = udir.join('test_stat_large_number.txt') + fname.ensure() + t1 = 5000000000.0 + os.utime(str(fname), (t1, t1)) + stat = ll_os_stat.make_win32_stat_impl('stat', ll_os.StringTraits()) + assert stat(str(fname)).st_mtime == t1 From noreply at buildbot.pypy.org Sun Nov 17 22:54:40 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 17 Nov 2013 22:54:40 +0100 (CET) Subject: [pypy-commit] buildbot add-header-to-nightly: fix regression to test status in directory listing Message-ID: <20131117215440.E0C311C0173@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: add-header-to-nightly Changeset: r895:75c380a6cecc Date: 2013-11-17 23:53 +0200 http://bitbucket.org/pypy/buildbot/changeset/75c380a6cecc/ Log: fix regression to test status in directory listing diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -156,6 +156,10 @@ '''template based, uses master/templates/directory.html ''' + def render(self, request): + self.status = request.site.buildbot_service.getStatus() + return DirectoryLister.render(self, request) + def _getFilesAndDirectories(self, directory): dirs, files = DirectoryLister._getFilesAndDirectories(self, directory) rowClasses = itertools.cycle(['odd', 'even']) From noreply at buildbot.pypy.org Sun Nov 17 22:54:42 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 17 Nov 2013 22:54:42 +0100 (CET) Subject: [pypy-commit] buildbot add-header-to-nightly: merge default into branch Message-ID: <20131117215442.169281C0225@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: add-header-to-nightly Changeset: r896:d69069fecc21 Date: 2013-11-17 23:54 +0200 http://bitbucket.org/pypy/buildbot/changeset/d69069fecc21/ Log: merge default into branch diff --git a/bot2/pypybuildbot/arm_master.py b/bot2/pypybuildbot/arm_master.py --- a/bot2/pypybuildbot/arm_master.py +++ b/bot2/pypybuildbot/arm_master.py @@ -136,6 +136,7 @@ BUILDJITLINUXARM, BUILDLINUXARMHF_RASPBIAN, BUILDJITLINUXARMHF_RASPBIAN, + BUILDJITLINUXARMHF_RARING, ] schedulers = [ diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -857,6 +857,11 @@ #env={"PYTHONPATH": ['download']}, # shouldn't be needed, but what if it is set externally? )) if host == 'tannit': + self.addStep(ShellCmd( + description="install jinja2", + command=['install/bin/pip', 'install', 'jinja2'], + workdir='./', + haltOnFailure=True,)) pypy_c_rel = 'install/bin/python' self.addStep(ShellCmd( description="measure numpy compatibility", diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -214,18 +214,21 @@ # other platforms #MACOSX32, # on minime JITWIN32, # on aurora - JITFREEBSD764, # on headless - JITFREEBSD864, # on ananke - JITFREEBSD964, # on exarkun's freebsd + #JITFREEBSD764, # on headless + #JITFREEBSD864, # on ananke + JITFREEBSD964, # on tavendo JITMACOSX64, # on xerxes # buildbot selftest PYPYBUILDBOT # on cobra ], branch='default', hour=0, minute=0), Nightly("nightly-2-00", [ + NUMPY_64, # on tannit64, uses 1 core, takes about 15min. + # XXX maybe use a trigger instead? JITBENCH, # on tannit32, uses 1 core (in part exclusively) JITBENCH64, # on tannit64, uses 1 core (in part exclusively) - ], branch='default', hour=2, minute=0), + + ], branch=None, hour=2, minute=0), Nightly("nightly-2-00-py3k", [ LINUX64, # on allegro64, uses all cores @@ -235,6 +238,7 @@ Nightly("nighly-ppc", [ JITONLYLINUXPPC64, # on gcc1 ], branch='ppc-jit-backend', hour=1, minute=0), + CustomForceScheduler('Force Scheduler', builderNames=[ PYPYBUILDBOT, @@ -411,7 +415,7 @@ "category": 'freebsd64' }, {"name" : JITFREEBSD964, - "slavenames": ['hybridlogic'], + "slavenames": ['hybridlogic', 'tavendo-freebsd-9.2-amd64'], 'builddir' : JITFREEBSD964, 'factory' : pypyJITTranslatedTestFactoryFreeBSD, "category": 'freebsd64' From noreply at buildbot.pypy.org Mon Nov 18 03:21:26 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 18 Nov 2013 03:21:26 +0100 (CET) Subject: [pypy-commit] pypy default: close files in this test Message-ID: <20131118022126.7426E1C0175@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68197:00c1596ab741 Date: 2013-11-17 21:20 -0500 http://bitbucket.org/pypy/pypy/changeset/00c1596ab741/ Log: close files in this test diff --git a/lib-python/2.7/test/test_old_mailbox.py b/lib-python/2.7/test/test_old_mailbox.py --- a/lib-python/2.7/test/test_old_mailbox.py +++ b/lib-python/2.7/test/test_old_mailbox.py @@ -73,7 +73,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -81,7 +83,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -90,8 +94,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 2) - self.assertTrue(self.mbox.next() is not None) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) From noreply at buildbot.pypy.org Mon Nov 18 04:22:54 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 18 Nov 2013 04:22:54 +0100 (CET) Subject: [pypy-commit] pypy default: fix this test for the bsd builder Message-ID: <20131118032254.610E11C0175@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68198:edd6b3a3df4d Date: 2013-11-17 22:22 -0500 http://bitbucket.org/pypy/pypy/changeset/edd6b3a3df4d/ Log: fix this test for the bsd builder diff --git a/pypy/module/test_lib_pypy/test_grp_extra.py b/pypy/module/test_lib_pypy/test_grp_extra.py --- a/pypy/module/test_lib_pypy/test_grp_extra.py +++ b/pypy/module/test_lib_pypy/test_grp_extra.py @@ -16,7 +16,7 @@ except KeyError: continue assert g.gr_gid == 0 - assert g.gr_mem == ['root'] or g.gr_mem == [] + assert 'root' in g.gr_mem or g.gr_mem == [] assert g.gr_name == name assert isinstance(g.gr_passwd, str) # usually just 'x', don't hope :-) break From noreply at buildbot.pypy.org Mon Nov 18 09:28:49 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 18 Nov 2013 09:28:49 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: after a transaction break, we need to check for invalidation again Message-ID: <20131118082849.2C00F1C07B6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68199:4fb6c8277d6c Date: 2013-11-18 09:28 +0100 http://bitbucket.org/pypy/pypy/changeset/4fb6c8277d6c/ Log: after a transaction break, we need to check for invalidation again diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -262,12 +262,14 @@ opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return + if (opnum == rop.STM_TRANSACTION_BREAK or + opnum == rop.CALL_ASSEMBLER): + self._seen_guard_not_invalidated = False if (opnum == rop.CALL or opnum == rop.CALL_PURE or opnum == rop.COND_CALL or opnum == rop.CALL_MAY_FORCE or - opnum == rop.CALL_RELEASE_GIL or - opnum == rop.CALL_ASSEMBLER): + opnum == rop.CALL_RELEASE_GIL): if opnum == rop.CALL_ASSEMBLER: self._seen_guard_not_invalidated = False else: From noreply at buildbot.pypy.org Mon Nov 18 09:29:18 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Nov 2013 09:29:18 +0100 (CET) Subject: [pypy-commit] pypy default: Hack around a FreeBSD issue Message-ID: <20131118082918.B8F131C07B6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68200:afb227c80804 Date: 2013-11-18 09:28 +0100 http://bitbucket.org/pypy/pypy/changeset/afb227c80804/ Log: Hack around a FreeBSD issue diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -4,6 +4,7 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rarithmetic import r_uint +from rpython.rlib.objectmodel import we_are_translated from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform @@ -79,6 +80,38 @@ RTLD_NOW = cConfig.RTLD_NOW RTLD_LAZY = cConfig.RTLD_LAZY + _t_opened = {} + + def t_dlopen(name): + # for direct execution: can't use the regular way on FreeBSD :-( + # http://factor-language.blogspot.de/2009/02/note-about-libdl-functions-on-netbsd.html + import ctypes + if name: + name = rffi.charp2str(name) + else: + name = None + try: + res = ctypes.cdll.LoadLibrary(name) + except OSError, e: + raise DLOpenError(str(e)) + h = rffi.cast(rffi.VOIDP, res._handle) + _t_opened[rffi.cast(rffi.LONG, h)] = res + return h + + def t_dlclose(handle): + _t_opened.pop(rffi.cast(rffi.LONG, handle)) + return rffi.cast(rffi.INT, 0) + + def t_dldym(handle, name): + import ctypes + lib = _t_opened[rffi.cast(rffi.LONG, handle)] + try: + symbol = lib[name] + except AttributeError: + raise KeyError(name) + res = ctypes.cast(symbol, ctypes.c_void_p) + return rffi.cast(rffi.VOIDP, res.value or 0) + def dlerror(): # XXX this would never work on top of ll2ctypes, because # ctypes are calling dlerror itself, unsure if I can do much in this @@ -91,6 +124,8 @@ def dlopen(name, mode=-1): """ Wrapper around C-level dlopen """ + if not we_are_translated(): + return t_dlopen(name) if mode == -1: if RTLD_LOCAL is not None: mode = RTLD_LOCAL @@ -104,11 +139,16 @@ raise DLOpenError(err) return res - dlclose = c_dlclose + def dlclose(handle): + if not we_are_translated(): + return t_dlclose(handle) + return c_dlclose(handle) def dlsym(libhandle, name): """ Wrapper around C-level dlsym """ + if not we_are_translated(): + return t_dldym(libhandle, name) res = c_dlsym(libhandle, name) if not res: raise KeyError(name) diff --git a/rpython/rlib/test/test_rdynload.py b/rpython/rlib/test/test_rdynload.py --- a/rpython/rlib/test/test_rdynload.py +++ b/rpython/rlib/test/test_rdynload.py @@ -21,3 +21,4 @@ lltype.Signed)), dlsym(lib, 'abs')) assert 1 == handle(1) assert 1 == handle(-1) + dlclose(lib) From noreply at buildbot.pypy.org Mon Nov 18 09:37:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Nov 2013 09:37:50 +0100 (CET) Subject: [pypy-commit] pypy default: Fix the test. Message-ID: <20131118083750.794501C07B6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68201:c4dd095a23ea Date: 2013-11-18 09:37 +0100 http://bitbucket.org/pypy/pypy/changeset/c4dd095a23ea/ Log: Fix the test. diff --git a/rpython/jit/backend/x86/test/test_ztranslation_basic.py b/rpython/jit/backend/x86/test/test_ztranslation_basic.py --- a/rpython/jit/backend/x86/test/test_ztranslation_basic.py +++ b/rpython/jit/backend/x86/test/test_ztranslation_basic.py @@ -1,11 +1,11 @@ from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTest -from rpython.translator.translator import TranslationContext -from rpython.config.translationoption import DEFL_GC +from rpython.jit.backend.x86.arch import WORD class TestTranslationX86(TranslationTest): def _check_cbuilder(self, cbuilder): # We assume here that we have sse2. If not, the CPUClass # needs to be changed to CPU386_NO_SSE2, but well. - assert '-msse2' in cbuilder.eci.compile_extra - assert '-mfpmath=sse' in cbuilder.eci.compile_extra + if WORD == 4: + assert '-msse2' in cbuilder.eci.compile_extra + assert '-mfpmath=sse' in cbuilder.eci.compile_extra From noreply at buildbot.pypy.org Mon Nov 18 11:20:01 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 18 Nov 2013 11:20:01 +0100 (CET) Subject: [pypy-commit] stmgc default: fix some debug assert Message-ID: <20131118102001.ADBAC1C00EC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r547:3d0358c80701 Date: 2013-11-08 15:28 +0100 http://bitbucket.org/pypy/stmgc/changeset/3d0358c80701/ Log: fix some debug assert diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -84,7 +84,7 @@ } #ifdef _GC_DEBUG if (P != NULL) { - assert(P->h_tid != 0); + assert((P->h_tid & STM_USER_TID_MASK) == (tid & STM_USER_TID_MASK)); assert_cleared(((char *)P) + sizeof(revision_t), size - sizeof(revision_t)); } diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -38,7 +38,7 @@ /* allocates a public reference to the object that will not be freed until stm_unregister_integer_address is - called on the result */ + called on the result (push roots!) */ intptr_t stm_allocate_public_integer_address(gcptr); void stm_unregister_integer_address(intptr_t); From noreply at buildbot.pypy.org Mon Nov 18 11:20:03 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 18 Nov 2013 11:20:03 +0100 (CET) Subject: [pypy-commit] stmgc default: fix public ints (usage of public h_originals is not always right, they Message-ID: <20131118102003.1706D1C00EC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r548:68677625f2be Date: 2013-11-18 11:19 +0100 http://bitbucket.org/pypy/stmgc/changeset/68677625f2be/ Log: fix public ints (usage of public h_originals is not always right, they need to be PREBUILT_ORIGINALs to be sure...) diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -278,8 +278,7 @@ gcptr obj = (gcptr)ip; assert(obj->h_tid & GCFLAG_PUBLIC); assert((obj->h_tid & GCFLAG_SMALLSTUB) - || (obj->h_original == 0 - || obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); + || (obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); check(obj); if (obj->h_revision & 2) check((gcptr)(obj->h_revision - 2)); @@ -304,7 +303,9 @@ if (td.num_public_ints == 0) return; + push_roots(); stm_unregister_integer_address(td.public_ints[--td.num_public_ints]); + pop_roots(); } gcptr read_barrier(gcptr p) diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -138,6 +138,7 @@ d->count_reads++; assert(IMPLIES(!(P->h_tid & GCFLAG_OLD), stmgc_is_in_nursery(d, P))); + assert(G->h_revision != 0); restart_all: if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) @@ -355,7 +356,7 @@ assert(P->h_tid & GCFLAG_PUBLIC); assert(IMPLIES(!(P->h_tid & GCFLAG_OLD), stmgc_is_in_nursery(thread_descriptor, P))); - + assert(P->h_revision != 0); revision_t v = ACCESS_ONCE(P->h_revision); assert(IS_POINTER(v)); /* "is a pointer", "has a more recent revision" */ @@ -660,6 +661,7 @@ gcptr stm_RepeatWriteBarrier(gcptr P) { + assert(P->h_revision != 0); assert(IMPLIES(!(P->h_tid & GCFLAG_OLD), stmgc_is_in_nursery(thread_descriptor, P))); @@ -673,6 +675,7 @@ gcptr stm_WriteBarrier(gcptr P) { + assert(P->h_revision != 0); assert(!(P->h_tid & GCFLAG_IMMUTABLE)); assert((P->h_tid & GCFLAG_STUB) || stmgc_size(P) > sizeof(struct stm_stub_s) - WORD); diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -65,7 +65,7 @@ intptr_t stm_allocate_public_integer_address(gcptr obj) -{ +{ /* push roots! */ struct tx_descriptor *d = thread_descriptor; gcptr stub; intptr_t result; @@ -75,6 +75,12 @@ During major collections, we visit them and update their references. */ + /* stm_register_integer_address needs to run in inevitable + transaction */ + stm_push_root(obj); + stm_become_inevitable("stm_allocate_public_integer_address"); + obj = stm_pop_root(); + /* we don't want to deal with young objs */ if (!(obj->h_tid & GCFLAG_OLD)) { stm_push_root(obj); @@ -93,9 +99,11 @@ orig = (gcptr)obj->h_original; } - if (orig->h_tid & GCFLAG_PUBLIC) { - /* the original is public, so we can take that as a non-movable - object to register */ + if ((orig->h_tid & (GCFLAG_PUBLIC | GCFLAG_PREBUILT_ORIGINAL)) + == (GCFLAG_PUBLIC | GCFLAG_PREBUILT_ORIGINAL)) { + /* public is not enough as public stubs may get replaced + by the protected object they point to, if they are in the + same thread (I think...) */ result = (intptr_t)orig; } else { @@ -115,9 +123,11 @@ result = (intptr_t)stub; } spinlock_release(d->public_descriptor->collection_lock); + + dprintf(("allocate_public_int_adr(%p): %p", obj, (void*)result)); + stm_register_integer_address(result); - dprintf(("allocate_public_int_adr(%p): %p", obj, (void*)result)); return result; } diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -218,10 +218,11 @@ /***** registering of small stubs as integer addresses *****/ void stm_register_integer_address(intptr_t adr) -{ +{ /* needs to be inevitable! */ wlog_t *found; gcptr obj = (gcptr)adr; /* current limitations for 'adr': smallstub or h_original */ + assert(stm_active == 2); assert((obj->h_tid & GCFLAG_SMALLSTUB) || (obj->h_original == 0 || obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); assert(obj->h_tid & GCFLAG_PUBLIC); @@ -241,7 +242,7 @@ } void stm_unregister_integer_address(intptr_t adr) -{ +{ /* push roots! */ wlog_t *found; gcptr obj = (gcptr)adr; @@ -249,6 +250,11 @@ || (obj->h_original == 0 || obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); assert(obj->h_tid & GCFLAG_PUBLIC); + /* become inevitable because we would have to re-register them + on abort, but make sure only to re-register if not registered + in the same aborted transaction (XXX) */ + stm_become_inevitable("stm_unregister_integer_address()"); + stmgcpage_acquire_global_lock(); /* find and decrement refcount */ @@ -527,12 +533,18 @@ G2L_LOOP_FORWARD(registered_objs, item) { gcptr R = item->addr; assert(R->h_tid & GCFLAG_PUBLIC); - - if ((R->h_original == 0) || (R->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { - /* the obj is an original and will therefore survive: */ - gcptr V = stmgcpage_visit(R); - assert(V == R); + + if (R->h_tid & GCFLAG_PREBUILT_ORIGINAL) { + /* already done by mark_prebuilt_roots */ + assert((R->h_tid & (GCFLAG_MARKED|GCFLAG_VISITED|GCFLAG_PUBLIC)) + == (GCFLAG_MARKED|GCFLAG_VISITED|GCFLAG_PUBLIC)); + continue; } + /* else if (R->h_original == 0) { */ + /* /\* the obj is an original and will therefore survive: *\/ */ + /* gcptr V = visit_public(R, NULL); */ + /* assert(V == R); */ + /* } */ else { assert(R->h_tid & GCFLAG_SMALLSTUB); /* only case for now */ /* make sure R stays valid: */ diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -40,7 +40,7 @@ not be freed until stm_unregister_integer_address is called on the result (push roots!) */ intptr_t stm_allocate_public_integer_address(gcptr); -void stm_unregister_integer_address(intptr_t); +void stm_unregister_integer_address(intptr_t); /* push roots too! */ /* returns a never changing hash for the object */ @@ -193,6 +193,7 @@ void stm_call_on_abort(void *key, void callback(void *)); /* only user currently is stm_allocate_public_integer_address() */ +/* needs to be in an inevitable transaction! */ void stm_register_integer_address(intptr_t); /* enter single-threaded mode. Used e.g. when patching assembler From noreply at buildbot.pypy.org Mon Nov 18 11:21:01 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 18 Nov 2013 11:21:01 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: import stmgc Message-ID: <20131118102101.D4CB31C026D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68202:88a0f0cc46a5 Date: 2013-11-18 11:20 +0100 http://bitbucket.org/pypy/pypy/changeset/88a0f0cc46a5/ Log: import stmgc diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -139,6 +139,7 @@ d->count_reads++; assert(IMPLIES(!(P->h_tid & GCFLAG_OLD), stmgc_is_in_nursery(d, P))); + assert(G->h_revision != 0); restart_all: if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) @@ -356,7 +357,7 @@ assert(P->h_tid & GCFLAG_PUBLIC); assert(IMPLIES(!(P->h_tid & GCFLAG_OLD), stmgc_is_in_nursery(thread_descriptor, P))); - + assert(P->h_revision != 0); revision_t v = ACCESS_ONCE(P->h_revision); assert(IS_POINTER(v)); /* "is a pointer", "has a more recent revision" */ @@ -661,6 +662,7 @@ gcptr stm_RepeatWriteBarrier(gcptr P) { + assert(P->h_revision != 0); assert(IMPLIES(!(P->h_tid & GCFLAG_OLD), stmgc_is_in_nursery(thread_descriptor, P))); @@ -674,6 +676,7 @@ gcptr stm_WriteBarrier(gcptr P) { + assert(P->h_revision != 0); assert(!(P->h_tid & GCFLAG_IMMUTABLE)); assert((P->h_tid & GCFLAG_STUB) || stmgc_size(P) > sizeof(struct stm_stub_s) - WORD); diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c --- a/rpython/translator/stm/src_stm/extra.c +++ b/rpython/translator/stm/src_stm/extra.c @@ -66,7 +66,7 @@ intptr_t stm_allocate_public_integer_address(gcptr obj) -{ +{ /* push roots! */ struct tx_descriptor *d = thread_descriptor; gcptr stub; intptr_t result; @@ -76,6 +76,12 @@ During major collections, we visit them and update their references. */ + /* stm_register_integer_address needs to run in inevitable + transaction */ + stm_push_root(obj); + stm_become_inevitable("stm_allocate_public_integer_address"); + obj = stm_pop_root(); + /* we don't want to deal with young objs */ if (!(obj->h_tid & GCFLAG_OLD)) { stm_push_root(obj); @@ -94,9 +100,11 @@ orig = (gcptr)obj->h_original; } - if (orig->h_tid & GCFLAG_PUBLIC) { - /* the original is public, so we can take that as a non-movable - object to register */ + if ((orig->h_tid & (GCFLAG_PUBLIC | GCFLAG_PREBUILT_ORIGINAL)) + == (GCFLAG_PUBLIC | GCFLAG_PREBUILT_ORIGINAL)) { + /* public is not enough as public stubs may get replaced + by the protected object they point to, if they are in the + same thread (I think...) */ result = (intptr_t)orig; } else { @@ -116,9 +124,11 @@ result = (intptr_t)stub; } spinlock_release(d->public_descriptor->collection_lock); + + dprintf(("allocate_public_int_adr(%p): %p", obj, (void*)result)); + stm_register_integer_address(result); - dprintf(("allocate_public_int_adr(%p): %p", obj, (void*)result)); return result; } diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c --- a/rpython/translator/stm/src_stm/gcpage.c +++ b/rpython/translator/stm/src_stm/gcpage.c @@ -219,10 +219,11 @@ /***** registering of small stubs as integer addresses *****/ void stm_register_integer_address(intptr_t adr) -{ +{ /* needs to be inevitable! */ wlog_t *found; gcptr obj = (gcptr)adr; /* current limitations for 'adr': smallstub or h_original */ + assert(stm_active == 2); assert((obj->h_tid & GCFLAG_SMALLSTUB) || (obj->h_original == 0 || obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); assert(obj->h_tid & GCFLAG_PUBLIC); @@ -242,7 +243,7 @@ } void stm_unregister_integer_address(intptr_t adr) -{ +{ /* push roots! */ wlog_t *found; gcptr obj = (gcptr)adr; @@ -250,6 +251,11 @@ || (obj->h_original == 0 || obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); assert(obj->h_tid & GCFLAG_PUBLIC); + /* become inevitable because we would have to re-register them + on abort, but make sure only to re-register if not registered + in the same aborted transaction (XXX) */ + stm_become_inevitable("stm_unregister_integer_address()"); + stmgcpage_acquire_global_lock(); /* find and decrement refcount */ @@ -528,12 +534,18 @@ G2L_LOOP_FORWARD(registered_objs, item) { gcptr R = item->addr; assert(R->h_tid & GCFLAG_PUBLIC); - - if ((R->h_original == 0) || (R->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { - /* the obj is an original and will therefore survive: */ - gcptr V = stmgcpage_visit(R); - assert(V == R); + + if (R->h_tid & GCFLAG_PREBUILT_ORIGINAL) { + /* already done by mark_prebuilt_roots */ + assert((R->h_tid & (GCFLAG_MARKED|GCFLAG_VISITED|GCFLAG_PUBLIC)) + == (GCFLAG_MARKED|GCFLAG_VISITED|GCFLAG_PUBLIC)); + continue; } + /* else if (R->h_original == 0) { */ + /* /\* the obj is an original and will therefore survive: *\/ */ + /* gcptr V = visit_public(R, NULL); */ + /* assert(V == R); */ + /* } */ else { assert(R->h_tid & GCFLAG_SMALLSTUB); /* only case for now */ /* make sure R stays valid: */ diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c --- a/rpython/translator/stm/src_stm/nursery.c +++ b/rpython/translator/stm/src_stm/nursery.c @@ -85,7 +85,7 @@ } #ifdef _GC_DEBUG if (P != NULL) { - assert(P->h_tid != 0); + assert((P->h_tid & STM_USER_TID_MASK) == (tid & STM_USER_TID_MASK)); assert_cleared(((char *)P) + sizeof(revision_t), size - sizeof(revision_t)); } diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -14ac008e70a5 +68677625f2be diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -39,9 +39,9 @@ /* allocates a public reference to the object that will not be freed until stm_unregister_integer_address is - called on the result */ + called on the result (push roots!) */ intptr_t stm_allocate_public_integer_address(gcptr); -void stm_unregister_integer_address(intptr_t); +void stm_unregister_integer_address(intptr_t); /* push roots too! */ /* returns a never changing hash for the object */ @@ -194,6 +194,7 @@ void stm_call_on_abort(void *key, void callback(void *)); /* only user currently is stm_allocate_public_integer_address() */ +/* needs to be in an inevitable transaction! */ void stm_register_integer_address(intptr_t); /* enter single-threaded mode. Used e.g. when patching assembler From noreply at buildbot.pypy.org Mon Nov 18 12:05:31 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Nov 2013 12:05:31 +0100 (CET) Subject: [pypy-commit] cffi default: Add a note to avoid people getting confused by the error message. Message-ID: <20131118110531.AFFD71C07B6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1421:dd14c6808158 Date: 2013-11-18 12:05 +0100 http://bitbucket.org/cffi/cffi/changeset/dd14c6808158/ Log: Add a note to avoid people getting confused by the error message. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -48,6 +48,7 @@ try: compiler.compile(['c/check__thread.c']) except distutils.errors.CompileError: + print >> sys.stderr, "the above error message can be safely ignored;" print >> sys.stderr, "will not use '__thread' in the C code" else: define_macros.append(('USE__THREAD', None)) From noreply at buildbot.pypy.org Mon Nov 18 12:06:54 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 18 Nov 2013 12:06:54 +0100 (CET) Subject: [pypy-commit] pypy windows-packaging: merge with default into branch Message-ID: <20131118110654.731D11C07B6@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: windows-packaging Changeset: r68203:7d65e31ab264 Date: 2013-11-18 13:04 +0200 http://bitbucket.org/pypy/pypy/changeset/7d65e31ab264/ Log: merge with default into branch diff --git a/lib-python/2.7/test/test_old_mailbox.py b/lib-python/2.7/test/test_old_mailbox.py --- a/lib-python/2.7/test/test_old_mailbox.py +++ b/lib-python/2.7/test/test_old_mailbox.py @@ -73,7 +73,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -81,7 +83,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -90,8 +94,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 2) - self.assertTrue(self.mbox.next() is not None) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -268,10 +268,18 @@ if _has_load_extension(): _ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);") -_lib = _ffi.verify(""" -#include -""", libraries=['sqlite3'] -) +if sys.platform.startswith('freebsd'): + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'] + ) +else: + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'] + ) exported_sqlite_symbols = [ 'SQLITE_ALTER_TABLE', diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -942,7 +942,8 @@ self.w_tmp_dir = self.space.wrap(tmp_dir) - foo_py = prefix.join('foo.py').write("pass") + foo_py = prefix.join('foo.py') + foo_py.write("pass") self.w_foo_py = self.space.wrap(str(foo_py)) def test_setup_bootstrap_path(self): diff --git a/pypy/module/termios/test/test_termios.py b/pypy/module/termios/test/test_termios.py --- a/pypy/module/termios/test/test_termios.py +++ b/pypy/module/termios/test/test_termios.py @@ -7,6 +7,9 @@ if os.name != 'posix': py.test.skip('termios module only available on unix') +if sys.platform.startswith('freebsd'): + raise Exception('XXX seems to hangs on FreeBSD9') + class TestTermios(object): def setup_class(cls): try: diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py @@ -1638,7 +1638,11 @@ #include #define alloca _alloca #else - #include + # ifdef __FreeBSD__ + # include + # else + # include + # endif #endif static int (*python_callback)(int how_many, int *values); static int c_callback(int how_many, ...) { diff --git a/pypy/module/test_lib_pypy/pyrepl/__init__.py b/pypy/module/test_lib_pypy/pyrepl/__init__.py --- a/pypy/module/test_lib_pypy/pyrepl/__init__.py +++ b/pypy/module/test_lib_pypy/pyrepl/__init__.py @@ -1,3 +1,6 @@ import sys import lib_pypy.pyrepl sys.modules['pyrepl'] = sys.modules['lib_pypy.pyrepl'] + +if sys.platform.startswith('freebsd'): + raise Exception('XXX seems to hangs on FreeBSD9') diff --git a/pypy/module/test_lib_pypy/test_grp_extra.py b/pypy/module/test_lib_pypy/test_grp_extra.py --- a/pypy/module/test_lib_pypy/test_grp_extra.py +++ b/pypy/module/test_lib_pypy/test_grp_extra.py @@ -16,7 +16,7 @@ except KeyError: continue assert g.gr_gid == 0 - assert g.gr_mem == ['root'] or g.gr_mem == [] + assert 'root' in g.gr_mem or g.gr_mem == [] assert g.gr_name == name assert isinstance(g.gr_passwd, str) # usually just 'x', don't hope :-) break diff --git a/rpython/jit/backend/x86/support.py b/rpython/jit/backend/x86/support.py --- a/rpython/jit/backend/x86/support.py +++ b/rpython/jit/backend/x86/support.py @@ -5,12 +5,13 @@ if WORD == 4: extra = ['-DPYPY_X86_CHECK_SSE2'] + if sys.platform != 'win32': + extra += ['-msse2', '-mfpmath=sse'] else: - extra = [] + extra = [] # the -m options above are always on by default on x86-64 if sys.platform != 'win32': - extra = ['-msse2', '-mfpmath=sse', - '-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra + extra = ['-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( compile_extra = extra, diff --git a/rpython/jit/backend/x86/test/test_ztranslation_basic.py b/rpython/jit/backend/x86/test/test_ztranslation_basic.py --- a/rpython/jit/backend/x86/test/test_ztranslation_basic.py +++ b/rpython/jit/backend/x86/test/test_ztranslation_basic.py @@ -1,11 +1,11 @@ from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTest -from rpython.translator.translator import TranslationContext -from rpython.config.translationoption import DEFL_GC +from rpython.jit.backend.x86.arch import WORD class TestTranslationX86(TranslationTest): def _check_cbuilder(self, cbuilder): # We assume here that we have sse2. If not, the CPUClass # needs to be changed to CPU386_NO_SSE2, but well. - assert '-msse2' in cbuilder.eci.compile_extra - assert '-mfpmath=sse' in cbuilder.eci.compile_extra + if WORD == 4: + assert '-msse2' in cbuilder.eci.compile_extra + assert '-mfpmath=sse' in cbuilder.eci.compile_extra diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -4,6 +4,7 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rarithmetic import r_uint +from rpython.rlib.objectmodel import we_are_translated from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform @@ -79,6 +80,38 @@ RTLD_NOW = cConfig.RTLD_NOW RTLD_LAZY = cConfig.RTLD_LAZY + _t_opened = {} + + def t_dlopen(name): + # for direct execution: can't use the regular way on FreeBSD :-( + # http://factor-language.blogspot.de/2009/02/note-about-libdl-functions-on-netbsd.html + import ctypes + if name: + name = rffi.charp2str(name) + else: + name = None + try: + res = ctypes.cdll.LoadLibrary(name) + except OSError, e: + raise DLOpenError(str(e)) + h = rffi.cast(rffi.VOIDP, res._handle) + _t_opened[rffi.cast(rffi.LONG, h)] = res + return h + + def t_dlclose(handle): + _t_opened.pop(rffi.cast(rffi.LONG, handle)) + return rffi.cast(rffi.INT, 0) + + def t_dldym(handle, name): + import ctypes + lib = _t_opened[rffi.cast(rffi.LONG, handle)] + try: + symbol = lib[name] + except AttributeError: + raise KeyError(name) + res = ctypes.cast(symbol, ctypes.c_void_p) + return rffi.cast(rffi.VOIDP, res.value or 0) + def dlerror(): # XXX this would never work on top of ll2ctypes, because # ctypes are calling dlerror itself, unsure if I can do much in this @@ -91,6 +124,8 @@ def dlopen(name, mode=-1): """ Wrapper around C-level dlopen """ + if not we_are_translated(): + return t_dlopen(name) if mode == -1: if RTLD_LOCAL is not None: mode = RTLD_LOCAL @@ -104,11 +139,16 @@ raise DLOpenError(err) return res - dlclose = c_dlclose + def dlclose(handle): + if not we_are_translated(): + return t_dlclose(handle) + return c_dlclose(handle) def dlsym(libhandle, name): """ Wrapper around C-level dlsym """ + if not we_are_translated(): + return t_dldym(libhandle, name) res = c_dlsym(libhandle, name) if not res: raise KeyError(name) diff --git a/rpython/rlib/test/test_rdynload.py b/rpython/rlib/test/test_rdynload.py --- a/rpython/rlib/test/test_rdynload.py +++ b/rpython/rlib/test/test_rdynload.py @@ -21,3 +21,4 @@ lltype.Signed)), dlsym(lib, 'abs')) assert 1 == handle(1) assert 1 == handle(-1) + dlclose(lib) From noreply at buildbot.pypy.org Mon Nov 18 12:06:55 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 18 Nov 2013 12:06:55 +0100 (CET) Subject: [pypy-commit] pypy windows-packaging: document branch Message-ID: <20131118110655.A74D81C07B6@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: windows-packaging Changeset: r68204:d1aeb871790a Date: 2013-11-18 13:06 +0200 http://bitbucket.org/pypy/pypy/changeset/d1aeb871790a/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -9,3 +9,7 @@ .. branch: numpy-newbyteorder Clean up numpy types, add newbyteorder functionality + +.. branch windows-packaging +Package tk/tcl runtime with win32 + From noreply at buildbot.pypy.org Mon Nov 18 12:06:56 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 18 Nov 2013 12:06:56 +0100 (CET) Subject: [pypy-commit] pypy windows-packaging: close branch to be merged Message-ID: <20131118110656.BFA841C07B6@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: windows-packaging Changeset: r68205:2d1eb388152e Date: 2013-11-18 13:06 +0200 http://bitbucket.org/pypy/pypy/changeset/2d1eb388152e/ Log: close branch to be merged From noreply at buildbot.pypy.org Mon Nov 18 12:06:58 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 18 Nov 2013 12:06:58 +0100 (CET) Subject: [pypy-commit] pypy default: merge windows-packaging, which add the tcl/tk runtime to win32 for cpython compatability Message-ID: <20131118110658.0DC141C07B6@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r68206:4cc0b451e81f Date: 2013-11-18 13:07 +0200 http://bitbucket.org/pypy/pypy/changeset/4cc0b451e81f/ Log: merge windows-packaging, which add the tcl/tk runtime to win32 for cpython compatability diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -112,6 +112,10 @@ incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform == 'win32': + incdirs = [] + linklibs = ['tcl85', 'tk85'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -9,3 +9,7 @@ .. branch: numpy-newbyteorder Clean up numpy types, add newbyteorder functionality + +.. branch windows-packaging +Package tk/tcl runtime with win32 + diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -73,11 +73,11 @@ https://bitbucket.org/pypy/pypy/downloads/local.zip Then expand it into the base directory (base_dir) and modify your environment to reflect this:: - set PATH=\bin;%PATH% - set INCLUDE=\include;%INCLUDE% - set LIB=\lib;%LIB% + set PATH=\bin;\tcltk\bin;%PATH% + set INCLUDE=\include;\tcltk\include;%INCLUDE% + set LIB=\lib;\tcltk\lib;%LIB% -Now you should be good to go. Read on for more information. +Now you should be good to go. Read on for more information. The Boehm garbage collector ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -109,11 +109,10 @@ The bz2 compression library ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Download http://bzip.org/1.0.5/bzip2-1.0.5.tar.gz and extract it in -the base directory. Then compile:: - - cd bzip2-1.0.5 + svn export http://svn.python.org/projects/external/bzip2-1.0.6 + cd bzip2-1.0.6 nmake -f makefile.msc + copy bzip.dll \bzip.dll The sqlite3 database library ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -122,8 +121,6 @@ wrapper is compiled when the module is imported for the first time. The sqlite3.dll should be version 3.6.21 for CPython2.7 compatablility. - - The expat XML parser ~~~~~~~~~~~~~~~~~~~~ @@ -146,13 +143,33 @@ use the one distributed by ActiveState, or the one from cygwin. In both case the perl interpreter must be found on the PATH. -Get http://www.openssl.org/source/openssl-0.9.8k.tar.gz and extract it -in the base directory. Then compile:: - + svn export http://svn.python.org/projects/external/openssl-0.9.8y + cd openssl-0.9.8y perl Configure VC-WIN32 ms\do_ms.bat nmake -f ms\nt.mak install +TkInter module support +~~~~~~~~~~~~~~~~~~~~~~ + +Note that much of this is taken from the cpython build process. +Tkinter is imported via cffi, so the module is optional. To recreate the tcltk +directory found for the release script, create the dlls, libs, headers and +runtime by running:: + + svn export http://svn.python.org/projects/external/tcl-8.5.2.1 tcl85 + svn export http://svn.python.org/projects/external/tk-8.5.2.0 tk85 + cd tcl85\win + nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 DEBUG=0 INSTALLDIR=..\..\tcltk clean all + nmake -f makefile.vc DEBUG=0 INSTALLDIR=..\..\tcltk install + cd ..\..\tk85\win + nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl85 clean all + nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl85 install + +Now you should have a tcktk\bin, tcltk\lib, and tcltk\include directory ready +for use. The release packaging script will pick up the tcltk runtime in the lib +directory and put it in the archive. + Using the mingw compiler ------------------------ diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -67,18 +67,22 @@ raise PyPyCNotFound( 'Bogus path: %r does not exist (see docstring for more info)' % (os.path.dirname(str(pypy_c)),)) + win_extras = ['libpypy-c.dll', 'libexpat.dll', 'sqlite3.dll', + 'libeay32.dll', 'ssleay32.dll'] subprocess.check_call([str(pypy_c), '-c', 'import _sqlite3']) if not sys.platform == 'win32': subprocess.check_call([str(pypy_c), '-c', 'import _curses']) subprocess.check_call([str(pypy_c), '-c', 'import syslog']) - if not withouttk: - try: - subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) - except subprocess.CalledProcessError: - print >>sys.stderr, """Building Tk bindings failed. + if not withouttk: + try: + subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) + except subprocess.CalledProcessError: + print >>sys.stderr, """Building Tk bindings failed. You can either install Tk development headers package or add --without-tk option to skip packaging binary CFFI extension.""" - sys.exit(1) + sys.exit(1) + #Can the dependencies be found from cffi somehow? + win_extras += ['tcl85.dll', 'tk85.dll'] if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'): rename_pypy_c += '.exe' binaries = [(pypy_c, rename_pypy_c)] @@ -101,9 +105,7 @@ # Can't rename a DLL: it is always called 'libpypy-c.dll' - for extra in ['libpypy-c.dll', - 'libexpat.dll', 'sqlite3.dll', - 'libeay32.dll', 'ssleay32.dll']: + for extra in win_extras: p = pypy_c.dirpath().join(extra) if not p.check(): p = py.path.local.sysfind(extra) @@ -122,6 +124,19 @@ # XXX users will complain that they cannot compile cpyext # modules for windows, has the lib moved or are there no # exported functions in the dll so no import library is created? + if not withouttk: + try: + p = pypy_c.dirpath().join('tcl85.dll') + if not p.check(): + p = py.path.local.sysfind('tcl85.dll') + tktcldir = p.dirpath().join('..').join('lib') + shutil.copytree(str(tktcldir), str(pypydir.join('tcl'))) + except WindowsError: + print >>sys.stderr, """Packaging Tk runtime failed. +tk85.dll and tcl85.dll found, expecting to find runtime in ..\\lib +directory next to the dlls, as per build instructions.""" + import traceback;traceback.print_exc() + sys.exit(1) # Careful: to copy lib_pypy, copying just the hg-tracked files # would not be enough: there are also ctypes_config_cache/_*_cache.py. From noreply at buildbot.pypy.org Mon Nov 18 12:14:39 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Nov 2013 12:14:39 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Merge the non-doc parts of the 'windows-packaging' branch here. Message-ID: <20131118111439.D0B391C026D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r68207:1567dba349e6 Date: 2013-11-18 12:14 +0100 http://bitbucket.org/pypy/pypy/changeset/1567dba349e6/ Log: Merge the non-doc parts of the 'windows-packaging' branch here. diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -112,6 +112,10 @@ incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform == 'win32': + incdirs = [] + linklibs = ['tcl85', 'tk85'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -67,18 +67,22 @@ raise PyPyCNotFound( 'Bogus path: %r does not exist (see docstring for more info)' % (os.path.dirname(str(pypy_c)),)) + win_extras = ['libpypy-c.dll', 'libexpat.dll', 'sqlite3.dll', + 'libeay32.dll', 'ssleay32.dll'] subprocess.check_call([str(pypy_c), '-c', 'import _sqlite3']) if not sys.platform == 'win32': subprocess.check_call([str(pypy_c), '-c', 'import _curses']) subprocess.check_call([str(pypy_c), '-c', 'import syslog']) - if not withouttk: - try: - subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) - except subprocess.CalledProcessError: - print >>sys.stderr, """Building Tk bindings failed. + if not withouttk: + try: + subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) + except subprocess.CalledProcessError: + print >>sys.stderr, """Building Tk bindings failed. You can either install Tk development headers package or add --without-tk option to skip packaging binary CFFI extension.""" - sys.exit(1) + sys.exit(1) + #Can the dependencies be found from cffi somehow? + win_extras += ['tcl85.dll', 'tk85.dll'] if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'): rename_pypy_c += '.exe' binaries = [(pypy_c, rename_pypy_c)] @@ -101,9 +105,7 @@ # Can't rename a DLL: it is always called 'libpypy-c.dll' - for extra in ['libpypy-c.dll', - 'libexpat.dll', 'sqlite3.dll', - 'libeay32.dll', 'ssleay32.dll']: + for extra in win_extras: p = pypy_c.dirpath().join(extra) if not p.check(): p = py.path.local.sysfind(extra) @@ -122,6 +124,19 @@ # XXX users will complain that they cannot compile cpyext # modules for windows, has the lib moved or are there no # exported functions in the dll so no import library is created? + if not withouttk: + try: + p = pypy_c.dirpath().join('tcl85.dll') + if not p.check(): + p = py.path.local.sysfind('tcl85.dll') + tktcldir = p.dirpath().join('..').join('lib') + shutil.copytree(str(tktcldir), str(pypydir.join('tcl'))) + except WindowsError: + print >>sys.stderr, """Packaging Tk runtime failed. +tk85.dll and tcl85.dll found, expecting to find runtime in ..\\lib +directory next to the dlls, as per build instructions.""" + import traceback;traceback.print_exc() + sys.exit(1) # Careful: to copy lib_pypy, copying just the hg-tracked files # would not be enough: there are also ctypes_config_cache/_*_cache.py. From noreply at buildbot.pypy.org Mon Nov 18 13:42:08 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 18 Nov 2013 13:42:08 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: make it possible to disable suprocess with an env var Message-ID: <20131118124208.73AEE1C02C5@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: release-2.2.x Changeset: r68208:1f5ff8d1ee6e Date: 2013-11-18 13:41 +0100 http://bitbucket.org/pypy/pypy/changeset/1f5ff8d1ee6e/ Log: make it possible to disable suprocess with an env var diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -49,7 +49,7 @@ sys.stdout.flush() -if sys.platform != 'win32' and hasattr(os, 'fork'): +if sys.platform != 'win32' and hasattr(os, 'fork') and not os.getenv("PYPY_DONT_RUN_SUBPROCESS", None): # do this at import-time, when the process is still tiny _source = os.path.dirname(os.path.abspath(__file__)) _source = os.path.join(_source, 'runsubprocess.py') # and not e.g. '.pyc' From noreply at buildbot.pypy.org Mon Nov 18 14:26:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Nov 2013 14:26:12 +0100 (CET) Subject: [pypy-commit] cffi default: Bah. "print >> sys.stderr, ..." is syntactically valid on Python 3, Message-ID: <20131118132612.B82B41C00EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1422:c1c4583b0bd1 Date: 2013-11-18 14:26 +0100 http://bitbucket.org/cffi/cffi/changeset/c1c4583b0bd1/ Log: Bah. "print >> sys.stderr, ..." is syntactically valid on Python 3, but does nonsense. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -48,8 +48,8 @@ try: compiler.compile(['c/check__thread.c']) except distutils.errors.CompileError: - print >> sys.stderr, "the above error message can be safely ignored;" - print >> sys.stderr, "will not use '__thread' in the C code" + sys.stderr.write("the above error message can be safely ignored;\n") + sys.stderr.write("will not use '__thread' in the C code\n") else: define_macros.append(('USE__THREAD', None)) try: From noreply at buildbot.pypy.org Mon Nov 18 15:13:40 2013 From: noreply at buildbot.pypy.org (oberstet) Date: Mon, 18 Nov 2013 15:13:40 +0100 (CET) Subject: [pypy-commit] pypy default: allow disable stripping/tk via env var Message-ID: <20131118141340.933931C00EC@cobra.cs.uni-duesseldorf.de> Author: Tobias Oberstein Branch: Changeset: r68209:679ca2fdefb6 Date: 2013-11-18 14:04 +0100 http://bitbucket.org/pypy/pypy/changeset/679ca2fdefb6/ Log: allow disable stripping/tk via env var diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -217,5 +217,11 @@ else: print_usage() + if os.environ.has_key("PYPY_PACKAGE_NOSTRIP"): + kw['nostrip'] = True + + if os.environ.has_key("PYPY_PACKAGE_WITHOUTTK"): + kw['withouttk'] = True + args = args[i:] package(*args, **kw) From noreply at buildbot.pypy.org Mon Nov 18 15:13:41 2013 From: noreply at buildbot.pypy.org (oberstet) Date: Mon, 18 Nov 2013 15:13:41 +0100 (CET) Subject: [pypy-commit] pypy default: allow disable strip/tk via env vars Message-ID: <20131118141341.C90851C026D@cobra.cs.uni-duesseldorf.de> Author: Tobias Oberstein Branch: Changeset: r68210:76bc9f2cd215 Date: 2013-11-18 14:10 +0100 http://bitbucket.org/pypy/pypy/changeset/76bc9f2cd215/ Log: allow disable strip/tk via env vars diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -232,5 +232,11 @@ else: print_usage() + if os.environ.has_key("PYPY_PACKAGE_NOSTRIP"): + kw['nostrip'] = True + + if os.environ.has_key("PYPY_PACKAGE_WITHOUTTK"): + kw['withouttk'] = True + args = args[i:] package(*args, **kw) From noreply at buildbot.pypy.org Mon Nov 18 15:13:42 2013 From: noreply at buildbot.pypy.org (oberstet) Date: Mon, 18 Nov 2013 15:13:42 +0100 (CET) Subject: [pypy-commit] pypy default: allow disable strip/tk via env vars Message-ID: <20131118141342.DDF1F1C00EC@cobra.cs.uni-duesseldorf.de> Author: Tobias Oberstein Branch: Changeset: r68211:0924956bd436 Date: 2013-11-18 14:11 +0100 http://bitbucket.org/pypy/pypy/changeset/0924956bd436/ Log: allow disable strip/tk via env vars From noreply at buildbot.pypy.org Mon Nov 18 15:13:43 2013 From: noreply at buildbot.pypy.org (oberstet) Date: Mon, 18 Nov 2013 15:13:43 +0100 (CET) Subject: [pypy-commit] pypy default: FreeBSD 9.2 / Tcl/Tk 8.6 paths include and lib paths Message-ID: <20131118141343.EF0841C00EC@cobra.cs.uni-duesseldorf.de> Author: Tobias Oberstein Branch: Changeset: r68212:68fe795f0c67 Date: 2013-11-18 14:18 +0100 http://bitbucket.org/pypy/pypy/changeset/68fe795f0c67/ Log: FreeBSD 9.2 / Tcl/Tk 8.6 paths include and lib paths diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -112,6 +112,10 @@ incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform.startswith("freebsd"): + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11'] + linklibs = ['tk86', 'tcl86'] + libdirs = ['/usr/local/lib'] elif sys.platform == 'win32': incdirs = [] linklibs = ['tcl85', 'tk85'] From noreply at buildbot.pypy.org Mon Nov 18 15:13:45 2013 From: noreply at buildbot.pypy.org (oberstet) Date: Mon, 18 Nov 2013 15:13:45 +0100 (CET) Subject: [pypy-commit] pypy default: skip test that assume glibc specific, non-standard-C behavior Message-ID: <20131118141345.2FBA41C00EC@cobra.cs.uni-duesseldorf.de> Author: Tobias Oberstein Branch: Changeset: r68213:dc08f917cd4f Date: 2013-11-18 14:43 +0100 http://bitbucket.org/pypy/pypy/changeset/dc08f917cd4f/ Log: skip test that assume glibc specific, non-standard-C behavior diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1086,7 +1086,9 @@ assert strlenaddr == cast(BVoidP, strlen) def test_read_variable(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -1094,7 +1096,9 @@ assert stderr == cast(BVoidP, _testfunc(8)) def test_read_variable_as_unknown_length_array(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BCharP = new_pointer_type(new_primitive_type("char")) BArray = new_array_type(BCharP, None) @@ -1104,7 +1108,9 @@ # ^^ and not 'char[]', which is basically not allowed and would crash def test_write_variable(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') From noreply at buildbot.pypy.org Mon Nov 18 15:14:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Nov 2013 15:14:50 +0100 (CET) Subject: [pypy-commit] cffi default: Skip these tests on FreeBSD too Message-ID: <20131118141450.2A4821C00EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1423:8b8f7428bd31 Date: 2013-11-18 15:14 +0100 http://bitbucket.org/cffi/cffi/changeset/8b8f7428bd31/ Log: Skip these tests on FreeBSD too diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -1097,7 +1097,9 @@ assert strlenaddr == cast(BVoidP, strlen) def test_read_variable(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -1105,7 +1107,9 @@ assert stderr == cast(BVoidP, _testfunc(8)) def test_read_variable_as_unknown_length_array(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BCharP = new_pointer_type(new_primitive_type("char")) BArray = new_array_type(BCharP, None) @@ -1115,7 +1119,9 @@ # ^^ and not 'char[]', which is basically not allowed and would crash def test_write_variable(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') From noreply at buildbot.pypy.org Mon Nov 18 17:43:11 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Mon, 18 Nov 2013 17:43:11 +0100 (CET) Subject: [pypy-commit] pypy numpypy-array_prepare_-array_wrap: Revert Ufunc{1, 2}.call Message-ID: <20131118164311.0725F1C026D@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-array_prepare_-array_wrap Changeset: r68214:d18c4175125e Date: 2013-11-18 17:39 +0100 http://bitbucket.org/pypy/pypy/changeset/d18c4175125e/ Log: Revert Ufunc{1,2}.call diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -343,17 +343,15 @@ w_obj.get_scalar_value().convert_to(calc_dtype)) if out is None: return w_val - if isinstance(out, W_NDimArray): - if out.is_scalar(): - out.set_scalar_value(w_val) - else: - out.fill(res_dtype.coerce(space, w_val)) - return self.call_prepare(space, out, w_obj, w_val) + if out.is_scalar(): + out.set_scalar_value(w_val) + else: + out.fill(res_dtype.coerce(space, w_val)) + return out shape = shape_agreement(space, w_obj.get_shape(), out, broadcast_down=False) - w_result = loop.call1(space, shape, self.func, calc_dtype, res_dtype, + return loop.call1(space, shape, self.func, calc_dtype, res_dtype, w_obj, out) - return self.call_prepare(space, out, w_obj, w_result) class W_Ufunc2(W_Ufunc): @@ -423,11 +421,11 @@ promote_bools=self.promote_bools) if space.is_none(w_out): out = None - #elif not isinstance(w_out, W_NDimArray): - # raise OperationError(space.w_TypeError, space.wrap( - # 'output must be an array')) + elif not isinstance(w_out, W_NDimArray): + raise OperationError(space.w_TypeError, space.wrap( + 'output must be an array')) else: - out = convert_to_array(space, w_out) + out = w_out calc_dtype = out.get_dtype() if self.comparison_func: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype @@ -443,15 +441,14 @@ out.set_scalar_value(arr) else: out.fill(arr) - arr = out - # XXX handle array_priority - return self.call_prepare(space, out, w_lhs, arr) + else: + out = arr + return out new_shape = shape_agreement(space, w_lhs.get_shape(), w_rhs) new_shape = shape_agreement(space, new_shape, out, broadcast_down=False) - w_result = loop.call2(space, new_shape, self.func, calc_dtype, + return loop.call2(space, new_shape, self.func, calc_dtype, res_dtype, w_lhs, w_rhs, out) - # XXX handle array_priority - return self.call_prepare(space, out, w_lhs, w_result) + W_Ufunc.typedef = TypeDef("ufunc", __module__ = "numpypy", From noreply at buildbot.pypy.org Mon Nov 18 17:43:12 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Mon, 18 Nov 2013 17:43:12 +0100 (CET) Subject: [pypy-commit] pypy numpypy-array_prepare_-array_wrap: Move call_prepare to loop.py Message-ID: <20131118164312.272211C026D@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-array_prepare_-array_wrap Changeset: r68215:145ff824d697 Date: 2013-11-18 17:42 +0100 http://bitbucket.org/pypy/pypy/changeset/145ff824d697/ Log: Move call_prepare to loop.py diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -256,38 +256,6 @@ return out return res - def call_prepare(self, space, w_out, w_obj, w_result): - if isinstance(w_out, W_NDimArray): - w_array = space.lookup(w_out, "__array_prepare__") - w_caller = w_out - else: - w_array = space.lookup(w_obj, "__array_prepare__") - w_caller = w_obj - if w_array: - w_retVal = space.get_and_call_function(w_array, w_caller, w_result, None) - if not isinstance(w_retVal, W_NDimArray) and \ - not isinstance(w_retVal, interp_boxes.Box): - raise OperationError(space.w_ValueError, - space.wrap( "__array_prepare__ must return an " - "ndarray or subclass thereof")) - if isinstance(w_result, interp_boxes.Box) or \ - w_result.is_scalar(): - if not isinstance(w_retVal, interp_boxes.Box) and not w_retVal.is_scalar(): - raise OperationError(space.w_TypeError, - space.wrap( "__array_prepare__ must return an " - "ndarray or subclass thereof which is " - "otherwise identical to its input")) - elif w_result.get_shape() != w_retVal.get_shape() or \ - w_result.implementation.get_strides() != \ - w_retVal.implementation.get_strides(): - raise OperationError(space.w_TypeError, - space.wrap( "__array_prepare__ must return an " - "ndarray or subclass thereof which is " - "otherwise identical to its input")) - return w_retVal - return w_result - - class W_Ufunc1(W_Ufunc): _immutable_fields_ = ["func", "bool_result"] argcount = 1 diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -19,6 +19,37 @@ reds = ['shape', 'w_lhs', 'w_rhs', 'out', 'left_iter', 'right_iter', 'out_iter']) +def call_prepare(self, space, w_out, w_obj, w_result): + if isinstance(w_out, W_NDimArray): + w_array = space.lookup(w_out, "__array_prepare__") + w_caller = w_out + else: + w_array = space.lookup(w_obj, "__array_prepare__") + w_caller = w_obj + if w_array: + w_retVal = space.get_and_call_function(w_array, w_caller, w_result, None) + if not isinstance(w_retVal, W_NDimArray) and \ + not isinstance(w_retVal, interp_boxes.Box): + raise OperationError(space.w_ValueError, + space.wrap( "__array_prepare__ must return an " + "ndarray or subclass thereof")) + if isinstance(w_result, interp_boxes.Box) or \ + w_result.is_scalar(): + if not isinstance(w_retVal, interp_boxes.Box) and not w_retVal.is_scalar(): + raise OperationError(space.w_TypeError, + space.wrap( "__array_prepare__ must return an " + "ndarray or subclass thereof which is " + "otherwise identical to its input")) + elif w_result.get_shape() != w_retVal.get_shape() or \ + w_result.implementation.get_strides() != \ + w_retVal.implementation.get_strides(): + raise OperationError(space.w_TypeError, + space.wrap( "__array_prepare__ must return an " + "ndarray or subclass thereof which is " + "otherwise identical to its input")) + return w_retVal + return w_result + def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): # handle array_priority # w_lhs and w_rhs could be of different ndarray subtypes. Numpy does: From noreply at buildbot.pypy.org Mon Nov 18 18:07:54 2013 From: noreply at buildbot.pypy.org (oberstet) Date: Mon, 18 Nov 2013 18:07:54 +0100 (CET) Subject: [pypy-commit] pypy default: C header include paths .. once more Message-ID: <20131118170754.1F5D01C036B@cobra.cs.uni-duesseldorf.de> Author: Tobias Oberstein Branch: Changeset: r68216:b6c763cd5357 Date: 2013-11-18 17:08 +0100 http://bitbucket.org/pypy/pypy/changeset/b6c763cd5357/ Log: C header include paths .. once more diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -113,7 +113,7 @@ linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] elif sys.platform.startswith("freebsd"): - incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11'] + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include'] linklibs = ['tk86', 'tcl86'] libdirs = ['/usr/local/lib'] elif sys.platform == 'win32': From noreply at buildbot.pypy.org Mon Nov 18 18:31:59 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Mon, 18 Nov 2013 18:31:59 +0100 (CET) Subject: [pypy-commit] pypy numpypy-array_prepare_-array_wrap: Remove useless code Message-ID: <20131118173159.1AAB91C026D@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-array_prepare_-array_wrap Changeset: r68217:5848d1c58c12 Date: 2013-11-18 18:30 +0100 http://bitbucket.org/pypy/pypy/changeset/5848d1c58c12/ Log: Remove useless code diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -173,7 +173,6 @@ shapelen = len(obj_shape) axis = unwrap_axis_arg(space, shapelen, w_axis) assert axis >= 0 - size = obj.get_size() dtype = interp_dtype.decode_w_dtype(space, dtype) if dtype is None: if self.comparison_func: From noreply at buildbot.pypy.org Mon Nov 18 18:32:00 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Mon, 18 Nov 2013 18:32:00 +0100 (CET) Subject: [pypy-commit] pypy numpypy-array_prepare_-array_wrap: Implement __array_prepare__ for non-scalar Message-ID: <20131118173200.4433B1C02D8@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-array_prepare_-array_wrap Changeset: r68218:25afd81e613b Date: 2013-11-18 18:31 +0100 http://bitbucket.org/pypy/pypy/changeset/25afd81e613b/ Log: Implement __array_prepare__ for non-scalar diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -12,22 +12,12 @@ from pypy.module.micronumpy.iter import PureShapeIterator from pypy.module.micronumpy import constants from pypy.module.micronumpy.support import int_w +from pypy.module.micronumpy import interp_boxes -call2_driver = jit.JitDriver(name='numpy_call2', - greens = ['shapelen', 'func', 'calc_dtype', - 'res_dtype'], - reds = ['shape', 'w_lhs', 'w_rhs', 'out', - 'left_iter', 'right_iter', 'out_iter']) - -def call_prepare(self, space, w_out, w_obj, w_result): - if isinstance(w_out, W_NDimArray): - w_array = space.lookup(w_out, "__array_prepare__") - w_caller = w_out - else: - w_array = space.lookup(w_obj, "__array_prepare__") - w_caller = w_obj +def call_prepare(space, w_obj, w_result): + w_array = space.lookup(w_obj, "__array_prepare__") if w_array: - w_retVal = space.get_and_call_function(w_array, w_caller, w_result, None) + w_retVal = space.get_and_call_function(w_array, w_obj, w_result, None) if not isinstance(w_retVal, W_NDimArray) and \ not isinstance(w_retVal, interp_boxes.Box): raise OperationError(space.w_ValueError, @@ -50,6 +40,11 @@ return w_retVal return w_result +call2_driver = jit.JitDriver(name='numpy_call2', + greens = ['shapelen', 'func', 'calc_dtype', + 'res_dtype'], + reds = ['shape', 'w_lhs', 'w_rhs', 'out', + 'left_iter', 'right_iter', 'out_iter']) def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): # handle array_priority # w_lhs and w_rhs could be of different ndarray subtypes. Numpy does: @@ -78,6 +73,10 @@ if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=lhs_for_subtype) + out = call_prepare(space, w_lhs, out) + else: + out = call_prepare(space, out, out) + left_iter = w_lhs.create_iter(shape) right_iter = w_rhs.create_iter(shape) out_iter = out.create_iter(shape) @@ -107,6 +106,9 @@ def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out): if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj) + out = call_prepare(space, w_obj, out) + else: + out = call_prepare(space, out, out) obj_iter = w_obj.create_iter(shape) out_iter = out.create_iter(shape) shapelen = len(shape) diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -260,7 +260,7 @@ assert type(x) == ndarray assert a.called_wrap - def test___array_prepare__2arg(self): + def test___array_prepare__2arg_scalar(self): from numpypy import ndarray, array, add, ones class with_prepare(ndarray): def __array_prepare__(self, arr, context): @@ -287,7 +287,7 @@ assert x.called_prepare raises(TypeError, add, a, b, out=c) - def test___array_prepare__1arg(self): + def test___array_prepare__1arg_scalar(self): from numpypy import ndarray, array, log, ones class with_prepare(ndarray): def __array_prepare__(self, arr, context): @@ -316,6 +316,61 @@ assert x.called_prepare raises(TypeError, log, a, out=c) + def test___array_prepare__2arg_array(self): + from numpypy import ndarray, array, add, ones + class with_prepare(ndarray): + def __array_prepare__(self, arr, context): + retVal = array(arr).view(type=with_prepare) + retVal.called_prepare = True + return retVal + class with_prepare_fail(ndarray): + called_prepare = False + def __array_prepare__(self, arr, context): + return array(arr[0]).view(type=with_prepare) + a = array([1]) + b = array([1]).view(type=with_prepare) + x = add(a, a, out=b) + assert x == 2 + assert type(x) == with_prepare + assert x.called_prepare + b.called_prepare = False + a = ones((3, 2)).view(type=with_prepare) + b = ones((3, 2)) + c = ones((3, 2)).view(type=with_prepare_fail) + x = add(a, b, out=a) + assert (x == 2).all() + assert type(x) == with_prepare + assert x.called_prepare + raises(TypeError, add, a, b, out=c) + + def test___array_prepare__1arg_array(self): + from numpypy import ndarray, array, log, ones + class with_prepare(ndarray): + def __array_prepare__(self, arr, context): + retVal = array(arr).view(type=with_prepare) + retVal.called_prepare = True + return retVal + class with_prepare_fail(ndarray): + def __array_prepare__(self, arr, context): + return array(arr[0]).view(type=with_prepare) + a = array([1]) + b = array([1]).view(type=with_prepare) + print 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxx' + x = log(a, out=b) + print 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' + assert x == 0 + print 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxx' + assert type(x) == with_prepare + assert x.called_prepare + x.called_prepare = False + a = ones((3, 2)).view(type=with_prepare) + b = ones((3, 2)) + c = ones((3, 2)).view(type=with_prepare_fail) + x = log(a) + assert (x == 0).all() + assert type(x) == with_prepare + assert x.called_prepare + raises(TypeError, log, a, out=c) def test___array_prepare__reduce(self): from numpypy import ndarray, array, sum, ones, add From noreply at buildbot.pypy.org Mon Nov 18 20:42:14 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 18 Nov 2013 20:42:14 +0100 (CET) Subject: [pypy-commit] pypy default: fix indexing using array scalars Message-ID: <20131118194214.206E71C00EC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68219:f5dbd427526c Date: 2013-11-18 14:41 -0500 http://bitbucket.org/pypy/pypy/changeset/f5dbd427526c/ Log: fix indexing using array scalars diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -211,7 +211,15 @@ "field named %s not found" % idx)) return RecordChunk(idx) if (space.isinstance_w(w_idx, space.w_int) or - space.isinstance_w(w_idx, space.w_slice)): + space.isinstance_w(w_idx, space.w_slice)): + return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) + elif isinstance(w_idx, W_NDimArray) and \ + isinstance(w_idx.implementation, scalar.Scalar): + w_idx = w_idx.get_scalar_value().item(space) + if not space.isinstance_w(w_idx, space.w_int) and \ + not space.isinstance_w(w_idx, space.w_bool): + raise OperationError(space.w_IndexError, space.wrap( + "arrays used as indices must be of integer (or boolean) type")) return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) elif space.is_w(w_idx, space.w_None): return Chunks([NewAxisChunk()]) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -198,7 +198,8 @@ prefix) def descr_getitem(self, space, w_idx): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type(): + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + and len(w_idx.get_shape()) > 0: return self.getitem_filter(space, w_idx) try: return self.implementation.descr_getitem(space, self, w_idx) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1862,12 +1862,16 @@ raises(IndexError, "arange(10)[array([10])] = 3") raises(IndexError, "arange(10)[[-11]] = 3") - def test_bool_single_index(self): + def test_array_scalar_index(self): import numpypy as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) - a[np.array(True)]; skip("broken") # check for crash but skip rest of test until correct + assert (a[np.array(0)] == a[0]).all() + assert (a[np.array(1)] == a[1]).all() + exc = raises(IndexError, "a[np.array(1.1)]") + assert exc.value.message == 'arrays used as indices must be of ' \ + 'integer (or boolean) type' assert (a[np.array(True)] == a[1]).all() assert (a[np.array(False)] == a[0]).all() From noreply at buildbot.pypy.org Mon Nov 18 20:49:13 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 18 Nov 2013 20:49:13 +0100 (CET) Subject: [pypy-commit] pypy default: fix setitem using array scalars also Message-ID: <20131118194913.65FA21C00EC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68220:c16f3a1dbc73 Date: 2013-11-18 14:48 -0500 http://bitbucket.org/pypy/pypy/changeset/c16f3a1dbc73/ Log: fix setitem using array scalars also diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -213,7 +213,8 @@ self.implementation.setitem_index(space, index_list, w_value) def descr_setitem(self, space, w_idx, w_value): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type(): + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + and len(w_idx.get_shape()) > 0: self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) return try: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1869,11 +1869,19 @@ [7, 8, 9]]) assert (a[np.array(0)] == a[0]).all() assert (a[np.array(1)] == a[1]).all() + assert (a[np.array(True)] == a[1]).all() + assert (a[np.array(False)] == a[0]).all() exc = raises(IndexError, "a[np.array(1.1)]") assert exc.value.message == 'arrays used as indices must be of ' \ 'integer (or boolean) type' - assert (a[np.array(True)] == a[1]).all() - assert (a[np.array(False)] == a[0]).all() + + a[np.array(1)] = a[2] + assert a[1][1] == 8 + a[np.array(True)] = a[0] + assert a[1][1] == 2 + exc = raises(IndexError, "a[np.array(1.1)] = a[2]") + assert exc.value.message == 'arrays used as indices must be of ' \ + 'integer (or boolean) type' def test_bool_array_index(self): from numpypy import arange, array From noreply at buildbot.pypy.org Mon Nov 18 21:04:43 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 18 Nov 2013 21:04:43 +0100 (CET) Subject: [pypy-commit] pypy voidtype_strformat: add failing test, almost implement Message-ID: <20131118200443.1A9DE1C00EC@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: voidtype_strformat Changeset: r68221:889adc434d6e Date: 2013-11-18 22:03 +0200 http://bitbucket.org/pypy/pypy/changeset/889adc434d6e/ Log: add failing test, almost implement diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3099,6 +3099,17 @@ assert len(list(a[0])) == 2 + def test_3d_record(self): + from numpypy import dtype, array + dt = dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 2, 3))]) + a = array([('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]])], + dtype=dt) + s = str(a) + assert s.endswith("[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])]") + + def test_issue_1589(self): import numpypy as numpy c = numpy.array([[(1, 2, 'a'), (3, 4, 'b')], [(5, 6, 'c'), (7, 8, 'd')]], diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1789,6 +1789,25 @@ dtype.subdtype) return W_NDimArray(implementation) + def str_format(self, val): + # only called with the results of readarray() + from pypy.module.micronumpy.base import W_NDimArray + assert isinstance(val, W_NDimArray) + i = val.create_iter() + first = True + dtype = val.get_dtype() + s = StringBuilder() + s.append('[') + while not i.done(): + if first: + first = False + else: + s.append(', ') + s.append(dtype.itemtype.str_format(i.getitem())) + i.next() + s.append(']') + return s.build() + class RecordType(FlexibleType): T = lltype.Char @@ -1848,7 +1867,11 @@ first = False else: pieces.append(", ") - pieces.append(tp.str_format(tp.read(box.arr, box.ofs, ofs))) + if isinstance(tp, VoidType): + val = tp.readarray(box.arr, box.ofs, ofs, subdtype) + else: + val = tp.read(box.arr, box.ofs, ofs, subdtype) + pieces.append(tp.str_format(val)) pieces.append(")") return "".join(pieces) From noreply at buildbot.pypy.org Mon Nov 18 22:52:20 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 18 Nov 2013 22:52:20 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: cleanup ArgErr* Message-ID: <20131118215220.CA1DA1C026D@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68222:ad17710e31c4 Date: 2013-10-30 06:25 +0000 http://bitbucket.org/pypy/pypy/changeset/ad17710e31c4/ Log: cleanup ArgErr* diff --git a/rpython/annotator/argument.py b/rpython/annotator/argument.py --- a/rpython/annotator/argument.py +++ b/rpython/annotator/argument.py @@ -105,10 +105,6 @@ # escape num_remainingkwds = len(keywords) for i, name in enumerate(keywords): - # If name was not encoded as a string, it could be None. In that - # case, it's definitely not going to be in the signature. - if name is None: - continue j = signature.find_argname(name) # if j == -1 nothing happens if j < input_argcount: @@ -122,8 +118,8 @@ if num_remainingkwds: if co_argcount == 0: raise ArgErrCount(num_args, num_kwds, signature, defaults_w, 0) - raise ArgErrUnknownKwds(self.space, num_remainingkwds, keywords, - kwds_mapping, self.keyword_names_w) + raise ArgErrUnknownKwds(num_remainingkwds, keywords, + kwds_mapping) # check for missing arguments and fill them from the kwds, # or with defaults, if available @@ -298,31 +294,12 @@ class ArgErrUnknownKwds(ArgErr): - def __init__(self, space, num_remainingkwds, keywords, kwds_mapping, - keyword_names_w): + def __init__(self, num_remainingkwds, keywords, kwds_mapping): name = '' self.num_kwds = num_remainingkwds if num_remainingkwds == 1: - for i in range(len(keywords)): - if i not in kwds_mapping: - name = keywords[i] - if name is None: - # We'll assume it's unicode. Encode it. - # Careful, I *think* it should not be possible to - # get an IndexError here but you never know. - try: - if keyword_names_w is None: - raise IndexError - # note: negative-based indexing from the end - w_name = keyword_names_w[i - len(keywords)] - except IndexError: - name = '?' - else: - w_enc = space.wrap(space.sys.defaultencoding) - w_err = space.wrap("replace") - w_name = space.call_method(w_name, "encode", w_enc, - w_err) - name = space.str_w(w_name) + for name in keywords: + if name not in kwds_mapping: break self.kwd_name = name From noreply at buildbot.pypy.org Mon Nov 18 22:52:22 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 18 Nov 2013 22:52:22 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: make ArgsFT.keywords always be a list Message-ID: <20131118215222.1B1BD1C026D@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68223:9c0b00eeb262 Date: 2013-10-31 03:42 +0000 http://bitbucket.org/pypy/pypy/changeset/9c0b00eeb262/ Log: make ArgsFT.keywords always be a list diff --git a/rpython/annotator/argument.py b/rpython/annotator/argument.py --- a/rpython/annotator/argument.py +++ b/rpython/annotator/argument.py @@ -11,9 +11,8 @@ assert w_starstararg is None assert isinstance(args_w, list) self.arguments_w = args_w - self.keywords = keywords - self.keywords_w = keywords_w - self.keyword_names_w = None + self.keywords = keywords or [] + self.keywords_w = keywords_w or [] def __repr__(self): """ NOT_RPYTHON """ @@ -71,7 +70,7 @@ args_w = self.positional_args num_args = len(args_w) - keywords = self.keywords or [] + keywords = self.keywords num_kwds = len(keywords) # put as many positional input arguments into place as available @@ -145,7 +144,7 @@ def unpack(self): "Return a ([w1,w2...], {'kw':w3...}) pair." - kwds_w = dict(zip(self.keywords, self.keywords_w)) if self.keywords else {} + kwds_w = dict(zip(self.keywords, self.keywords_w)) return self.positional_args, kwds_w def match_signature(self, signature, defaults_w): @@ -177,9 +176,8 @@ assert len(data_w) >= need_cnt args_w = data_w[:need_cnt] _kwds_w = dict(zip(argnames[need_cnt:], data_w[need_cnt:])) - keywords = self.keywords or [] - keywords_w = [_kwds_w[key] for key in keywords] - return ArgumentsForTranslation(args_w, keywords, keywords_w) + keywords_w = [_kwds_w[key] for key in self.keywords] + return ArgumentsForTranslation(args_w, self.keywords, keywords_w) @classmethod def fromshape(cls, (shape_cnt, shape_keys, shape_star, shape_stst), data_w): @@ -210,15 +208,11 @@ return (shape_cnt, shape_keys, shape_star, shape_stst), data_w def _rawshape(self, nextra=0): - shape_cnt = len(self.arguments_w) + nextra # Number of positional args - if self.keywords: - shape_keys = self.keywords[:] # List of keywords (strings) - shape_keys.sort() - else: - shape_keys = [] + shape_cnt = len(self.arguments_w) + nextra # Number of positional args + shape_keys = tuple(sorted(self.keywords)) shape_star = self.w_stararg is not None # Flag: presence of *arg - shape_stst = self.w_starstararg is not None # Flag: presence of **kwds - return shape_cnt, tuple(shape_keys), shape_star, shape_stst # shape_keys are sorted + shape_stst = self.w_starstararg is not None # Flag: presence of **kwds + return shape_cnt, shape_keys, shape_star, shape_stst def rawshape(args, nextra=0): diff --git a/rpython/annotator/test/test_argument.py b/rpython/annotator/test/test_argument.py --- a/rpython/annotator/test/test_argument.py +++ b/rpython/annotator/test/test_argument.py @@ -23,8 +23,8 @@ args1 = args.prepend("thingy") assert args1 is not args assert args1.arguments_w == ["thingy", "0"] - assert args1.keywords is args.keywords - assert args1.keywords_w is args.keywords_w + assert args1.keywords == args.keywords + assert args1.keywords_w == args.keywords_w def test_fixedunpacked(self): args = MockArgs([], ["k"], [1]) From noreply at buildbot.pypy.org Mon Nov 18 22:52:23 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 18 Nov 2013 22:52:23 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: Turn ArgsFT.keywords into a dict Message-ID: <20131118215223.50F171C026D@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68224:ec91da17a267 Date: 2013-10-31 18:40 +0000 http://bitbucket.org/pypy/pypy/changeset/ec91da17a267/ Log: Turn ArgsFT.keywords into a dict diff --git a/rpython/annotator/argument.py b/rpython/annotator/argument.py --- a/rpython/annotator/argument.py +++ b/rpython/annotator/argument.py @@ -5,14 +5,13 @@ class ArgumentsForTranslation(object): w_starstararg = None - def __init__(self, args_w, keywords=None, keywords_w=None, + def __init__(self, args_w, keywords=None, w_stararg=None, w_starstararg=None): self.w_stararg = w_stararg assert w_starstararg is None assert isinstance(args_w, list) self.arguments_w = args_w - self.keywords = keywords or [] - self.keywords_w = keywords_w or [] + self.keywords = keywords or {} def __repr__(self): """ NOT_RPYTHON """ @@ -20,8 +19,7 @@ if not self.keywords: return '%s(%s)' % (name, self.arguments_w,) else: - return '%s(%s, %s, %s)' % (name, self.arguments_w, - self.keywords, self.keywords_w) + return '%s(%s, %s)' % (name, self.arguments_w, self.keywords) @property def positional_args(self): @@ -52,12 +50,12 @@ def prepend(self, w_firstarg): # used often "Return a new Arguments with a new argument inserted first." return ArgumentsForTranslation([w_firstarg] + self.arguments_w, - self.keywords, self.keywords_w, self.w_stararg, + self.keywords, self.w_stararg, self.w_starstararg) def copy(self): return ArgumentsForTranslation(self.arguments_w, self.keywords, - self.keywords_w, self.w_stararg, self.w_starstararg) + self.w_stararg, self.w_starstararg) def _match_signature(self, scope_w, signature, defaults_w=None): """Parse args and kwargs according to the signature of a code object, @@ -92,18 +90,17 @@ # handle keyword arguments num_remainingkwds = 0 - keywords_w = self.keywords_w kwds_mapping = None if num_kwds: # kwds_mapping maps target indexes in the scope (minus input_argcount) - # to positions in the keywords_w list - kwds_mapping = [-1] * (co_argcount - input_argcount) + # to keyword names + kwds_mapping = [] # match the keywords given at the call site to the argument names # the called function takes # this function must not take a scope_w, to make the scope not # escape num_remainingkwds = len(keywords) - for i, name in enumerate(keywords): + for name in keywords: j = signature.find_argname(name) # if j == -1 nothing happens if j < input_argcount: @@ -111,7 +108,7 @@ if j >= 0: raise ArgErrMultipleValues(name) else: - kwds_mapping[j - input_argcount] = i # map to the right index + kwds_mapping.append(name) num_remainingkwds -= 1 if num_remainingkwds: @@ -126,14 +123,11 @@ if input_argcount < co_argcount: def_first = co_argcount - (0 if defaults_w is None else len(defaults_w)) j = 0 - kwds_index = -1 for i in range(input_argcount, co_argcount): - if kwds_mapping is not None: - kwds_index = kwds_mapping[j] - j += 1 - if kwds_index >= 0: - scope_w[i] = keywords_w[kwds_index] - continue + name = signature.argnames[i] + if name in keywords: + scope_w[i] = keywords[name] + continue defnum = i - def_first if defnum >= 0: scope_w[i] = defaults_w[defnum] @@ -144,8 +138,7 @@ def unpack(self): "Return a ([w1,w2...], {'kw':w3...}) pair." - kwds_w = dict(zip(self.keywords, self.keywords_w)) - return self.positional_args, kwds_w + return self.positional_args, self.keywords def match_signature(self, signature, defaults_w): """Parse args and kwargs according to the signature of a code object, @@ -169,7 +162,7 @@ args_w = data_w[:cnt] + stararg_w assert len(args_w) == need_cnt assert not self.keywords - return ArgumentsForTranslation(args_w, [], []) + return ArgumentsForTranslation(args_w, {}) else: data_w = data_w[:-1] assert len(data_w) == cnt @@ -177,7 +170,7 @@ args_w = data_w[:need_cnt] _kwds_w = dict(zip(argnames[need_cnt:], data_w[need_cnt:])) keywords_w = [_kwds_w[key] for key in self.keywords] - return ArgumentsForTranslation(args_w, self.keywords, keywords_w) + return ArgumentsForTranslation(args_w, dict(zip(self.keywords, keywords_w))) @classmethod def fromshape(cls, (shape_cnt, shape_keys, shape_star, shape_stst), data_w): @@ -193,14 +186,13 @@ p += 1 else: w_starstar = None - return cls(args_w, list(shape_keys), data_w[shape_cnt:end_keys], + return cls(args_w, dict(zip(shape_keys, data_w[shape_cnt:end_keys])), w_star, w_starstar) def flatten(self): """ Argument <-> list of w_objects together with "shape" information """ shape_cnt, shape_keys, shape_star, shape_stst = self._rawshape() - data_w = self.arguments_w + [self.keywords_w[self.keywords.index(key)] - for key in shape_keys] + data_w = self.arguments_w + [self.keywords[key] for key in shape_keys] if shape_star: data_w.append(self.w_stararg) if shape_stst: diff --git a/rpython/annotator/test/test_argument.py b/rpython/annotator/test/test_argument.py --- a/rpython/annotator/test/test_argument.py +++ b/rpython/annotator/test/test_argument.py @@ -11,11 +11,6 @@ return list(it) -def make_arguments_for_translation(args_w, keywords_w={}, w_stararg=None, - w_starstararg=None): - return MockArgs(args_w, keywords_w.keys(), keywords_w.values(), - w_stararg, w_starstararg) - class TestArgumentsForTranslation(object): def test_prepend(self): @@ -24,10 +19,9 @@ assert args1 is not args assert args1.arguments_w == ["thingy", "0"] assert args1.keywords == args.keywords - assert args1.keywords_w == args.keywords_w def test_fixedunpacked(self): - args = MockArgs([], ["k"], [1]) + args = MockArgs([], {"k": 1}) py.test.raises(ValueError, args.fixedunpack, 1) args = MockArgs(["a", "b"]) @@ -39,85 +33,84 @@ assert args.fixedunpack(2) == ['a', 'b'] def test_unmatch_signature(self): - args = make_arguments_for_translation([1, 2, 3]) + args = MockArgs([1, 2, 3]) sig = Signature(['a', 'b', 'c'], None, None) data = args.match_signature(sig, []) new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() - args = make_arguments_for_translation([1]) + args = MockArgs([1]) sig = Signature(['a', 'b', 'c'], None, None) data = args.match_signature(sig, [2, 3]) new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() - args = make_arguments_for_translation([1, 2, 3, 4, 5]) + args = MockArgs([1, 2, 3, 4, 5]) sig = Signature(['a', 'b', 'c'], 'r', None) data = args.match_signature(sig, []) new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() - args = make_arguments_for_translation([1], {'c': 3, 'b': 2}) + args = MockArgs([1], {'c': 3, 'b': 2}) sig = Signature(['a', 'b', 'c'], None, None) data = args.match_signature(sig, []) new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() - args = make_arguments_for_translation([1], {'c': 5}) + args = MockArgs([1], {'c': 5}) sig = Signature(['a', 'b', 'c'], None, None) data = args.match_signature(sig, [2, 3]) new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() def test_rawshape(self): - args = make_arguments_for_translation([1, 2, 3]) + args = MockArgs([1, 2, 3]) assert rawshape(args) == (3, (), False, False) - args = make_arguments_for_translation([1]) + args = MockArgs([1]) assert rawshape(args, 2) == (3, (), False, False) - args = make_arguments_for_translation([1, 2, 3, 4, 5]) + args = MockArgs([1, 2, 3, 4, 5]) assert rawshape(args) == (5, (), False, False) - args = make_arguments_for_translation([1], {'c': 3, 'b': 2}) + args = MockArgs([1], {'c': 3, 'b': 2}) assert rawshape(args) == (1, ('b', 'c'), False, False) - args = make_arguments_for_translation([1], {'c': 5}) + args = MockArgs([1], {'c': 5}) assert rawshape(args) == (1, ('c', ), False, False) - args = make_arguments_for_translation([1], {'c': 5, 'd': 7}) + args = MockArgs([1], {'c': 5, 'd': 7}) assert rawshape(args) == (1, ('c', 'd'), False, False) - args = make_arguments_for_translation([1, 2, 3, 4, 5], {'e': 5, 'd': 7}) + args = MockArgs([1, 2, 3, 4, 5], {'e': 5, 'd': 7}) assert rawshape(args) == (5, ('d', 'e'), False, False) def test_flatten(self): - args = make_arguments_for_translation([1, 2, 3]) + args = MockArgs([1, 2, 3]) assert args.flatten() == ((3, (), False, False), [1, 2, 3]) - args = make_arguments_for_translation([1]) + args = MockArgs([1]) assert args.flatten() == ((1, (), False, False), [1]) - args = make_arguments_for_translation([1, 2, 3, 4, 5]) + args = MockArgs([1, 2, 3, 4, 5]) assert args.flatten() == ((5, (), False, False), [1, 2, 3, 4, 5]) - args = make_arguments_for_translation([1], {'c': 3, 'b': 2}) + args = MockArgs([1], {'c': 3, 'b': 2}) assert args.flatten() == ((1, ('b', 'c'), False, False), [1, 2, 3]) - args = make_arguments_for_translation([1], {'c': 5}) + args = MockArgs([1], {'c': 5}) assert args.flatten() == ((1, ('c', ), False, False), [1, 5]) - args = make_arguments_for_translation([1], {'c': 5, 'd': 7}) + args = MockArgs([1], {'c': 5, 'd': 7}) assert args.flatten() == ((1, ('c', 'd'), False, False), [1, 5, 7]) - args = make_arguments_for_translation([1, 2, 3, 4, 5], {'e': 5, 'd': 7}) + args = MockArgs([1, 2, 3, 4, 5], {'e': 5, 'd': 7}) assert args.flatten() == ((5, ('d', 'e'), False, False), [1, 2, 3, 4, 5, 7, 5]) def test_stararg_flowspace_variable(self): var = object() shape = ((2, ('g', ), True, False), [1, 2, 9, var]) - args = make_arguments_for_translation([1, 2], {'g': 9}, - w_stararg=var) + args = MockArgs([1, 2], {'g': 9}, w_stararg=var) assert args.flatten() == shape args = MockArgs.fromshape(*shape) diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -74,10 +74,8 @@ raise TyperError("kwds args not supported") # prefix keyword arguments with 'i_' kwds_i = {} - for i, key in enumerate(keywords): - index = arguments.keywords_w[i] - kwds_i['i_' + key] = index - + for key in keywords: + kwds_i['i_' + key] = keywords[key] return hop, kwds_i From noreply at buildbot.pypy.org Mon Nov 18 22:52:24 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 18 Nov 2013 22:52:24 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: kill all (broken by design) support for **-unpacking Message-ID: <20131118215224.767A01C026D@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68225:cda622311685 Date: 2013-11-18 03:49 +0000 http://bitbucket.org/pypy/pypy/changeset/cda622311685/ Log: kill all (broken by design) support for **-unpacking diff --git a/rpython/annotator/argument.py b/rpython/annotator/argument.py --- a/rpython/annotator/argument.py +++ b/rpython/annotator/argument.py @@ -4,11 +4,8 @@ from rpython.annotator.model import SomeTuple class ArgumentsForTranslation(object): - w_starstararg = None - def __init__(self, args_w, keywords=None, - w_stararg=None, w_starstararg=None): + def __init__(self, args_w, keywords=None, w_stararg=None): self.w_stararg = w_stararg - assert w_starstararg is None assert isinstance(args_w, list) self.arguments_w = args_w self.keywords = keywords or {} @@ -50,12 +47,11 @@ def prepend(self, w_firstarg): # used often "Return a new Arguments with a new argument inserted first." return ArgumentsForTranslation([w_firstarg] + self.arguments_w, - self.keywords, self.w_stararg, - self.w_starstararg) + self.keywords, self.w_stararg) def copy(self): return ArgumentsForTranslation(self.arguments_w, self.keywords, - self.w_stararg, self.w_starstararg) + self.w_stararg) def _match_signature(self, scope_w, signature, defaults_w=None): """Parse args and kwargs according to the signature of a code object, @@ -173,7 +169,7 @@ return ArgumentsForTranslation(args_w, dict(zip(self.keywords, keywords_w))) @classmethod - def fromshape(cls, (shape_cnt, shape_keys, shape_star, shape_stst), data_w): + def fromshape(cls, (shape_cnt, shape_keys, shape_star), data_w): args_w = data_w[:shape_cnt] p = end_keys = shape_cnt + len(shape_keys) if shape_star: @@ -181,30 +177,22 @@ p += 1 else: w_star = None - if shape_stst: - w_starstar = data_w[p] - p += 1 - else: - w_starstar = None return cls(args_w, dict(zip(shape_keys, data_w[shape_cnt:end_keys])), - w_star, w_starstar) + w_star) def flatten(self): """ Argument <-> list of w_objects together with "shape" information """ - shape_cnt, shape_keys, shape_star, shape_stst = self._rawshape() + shape_cnt, shape_keys, shape_star = self._rawshape() data_w = self.arguments_w + [self.keywords[key] for key in shape_keys] if shape_star: data_w.append(self.w_stararg) - if shape_stst: - data_w.append(self.w_starstararg) - return (shape_cnt, shape_keys, shape_star, shape_stst), data_w + return (shape_cnt, shape_keys, shape_star), data_w def _rawshape(self, nextra=0): shape_cnt = len(self.arguments_w) + nextra # Number of positional args shape_keys = tuple(sorted(self.keywords)) shape_star = self.w_stararg is not None # Flag: presence of *arg - shape_stst = self.w_starstararg is not None # Flag: presence of **kwds - return shape_cnt, shape_keys, shape_star, shape_stst + return shape_cnt, shape_keys, shape_star def rawshape(args, nextra=0): diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -307,7 +307,7 @@ r_func, nimplicitarg = s_repr.const.get_r_implfunc() nbargs = len(args_s) + nimplicitarg - s_sigs = r_func.get_s_signatures((nbargs, (), False, False)) + s_sigs = r_func.get_s_signatures((nbargs, (), False)) if len(s_sigs) != 1: raise TyperError("cannot hlinvoke callable %r with not uniform" "annotations: %r" % (s_repr.const, diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -1065,8 +1065,9 @@ gf2 = graphof(a, f2) gf3 = graphof(a, f3) - assert fam1.calltables == {(2, (), False, False): [{fdesc1: gf1}], (1, (), False, False): [{fdesc1: gf1}]} - assert fam2.calltables == {(1, (), False, False): [{fdesc2: gf2, fdesc3: gf3}]} + assert fam1.calltables == {(2, (), False): [{fdesc1: gf1}], + (1, (), False): [{fdesc1: gf1}]} + assert fam2.calltables == {(1, (), False): [{fdesc2: gf2, fdesc3: gf3}]} def test_pbc_call_ins(self): class A(object): @@ -1117,14 +1118,14 @@ gfA_m = graphof(a, A.m.im_func) gfC_m = graphof(a, C.m.im_func) - assert famB_n.calltables == {(1, (), False, False): [{mdescB_n.funcdesc: gfB_n}] } - assert famA_m.calltables == {(1, (), False, False): [{mdescA_m.funcdesc: gfA_m, mdescC_m.funcdesc: gfC_m }] } + assert famB_n.calltables == {(1, (), False): [{mdescB_n.funcdesc: gfB_n}] } + assert famA_m.calltables == {(1, (), False): [{mdescA_m.funcdesc: gfA_m, mdescC_m.funcdesc: gfC_m }] } mdescCinit = getmdesc(C().__init__) famCinit = mdescCinit.getcallfamily() gfCinit = graphof(a, C.__init__.im_func) - assert famCinit.calltables == {(1, (), False, False): [{mdescCinit.funcdesc: gfCinit}] } + assert famCinit.calltables == {(1, (), False): [{mdescCinit.funcdesc: gfCinit}] } def test_isinstance_usigned(self): def f(x): @@ -2053,7 +2054,7 @@ someint = annmodel.SomeInteger() - assert (fdesc.get_s_signatures((2,(),False,False)) + assert (fdesc.get_s_signatures((2, (), False)) == [([someint,someint],someint)]) def test_emulated_pbc_call_callback(self): @@ -4080,7 +4081,7 @@ with py.test.raises(annmodel.UnionError) as exc: a.build_types(f, [int]) - assert ("RPython cannot unify instances with no common base class" + assert ("RPython cannot unify instances with no common base class" in exc.value.msg) def test_unionerror_iters(self): @@ -4096,7 +4097,7 @@ with py.test.raises(annmodel.UnionError) as exc: a.build_types(f, [int]) - assert ("RPython cannot unify incompatible iterator variants" in + assert ("RPython cannot unify incompatible iterator variants" in exc.value.msg) def test_variable_getattr(self): diff --git a/rpython/annotator/test/test_argument.py b/rpython/annotator/test/test_argument.py --- a/rpython/annotator/test/test_argument.py +++ b/rpython/annotator/test/test_argument.py @@ -65,51 +65,51 @@ def test_rawshape(self): args = MockArgs([1, 2, 3]) - assert rawshape(args) == (3, (), False, False) + assert rawshape(args) == (3, (), False) args = MockArgs([1]) - assert rawshape(args, 2) == (3, (), False, False) + assert rawshape(args, 2) == (3, (), False) args = MockArgs([1, 2, 3, 4, 5]) - assert rawshape(args) == (5, (), False, False) + assert rawshape(args) == (5, (), False) args = MockArgs([1], {'c': 3, 'b': 2}) - assert rawshape(args) == (1, ('b', 'c'), False, False) + assert rawshape(args) == (1, ('b', 'c'), False) args = MockArgs([1], {'c': 5}) - assert rawshape(args) == (1, ('c', ), False, False) + assert rawshape(args) == (1, ('c', ), False) args = MockArgs([1], {'c': 5, 'd': 7}) - assert rawshape(args) == (1, ('c', 'd'), False, False) + assert rawshape(args) == (1, ('c', 'd'), False) args = MockArgs([1, 2, 3, 4, 5], {'e': 5, 'd': 7}) - assert rawshape(args) == (5, ('d', 'e'), False, False) + assert rawshape(args) == (5, ('d', 'e'), False) def test_flatten(self): args = MockArgs([1, 2, 3]) - assert args.flatten() == ((3, (), False, False), [1, 2, 3]) + assert args.flatten() == ((3, (), False), [1, 2, 3]) args = MockArgs([1]) - assert args.flatten() == ((1, (), False, False), [1]) + assert args.flatten() == ((1, (), False), [1]) args = MockArgs([1, 2, 3, 4, 5]) - assert args.flatten() == ((5, (), False, False), [1, 2, 3, 4, 5]) + assert args.flatten() == ((5, (), False), [1, 2, 3, 4, 5]) args = MockArgs([1], {'c': 3, 'b': 2}) - assert args.flatten() == ((1, ('b', 'c'), False, False), [1, 2, 3]) + assert args.flatten() == ((1, ('b', 'c'), False), [1, 2, 3]) args = MockArgs([1], {'c': 5}) - assert args.flatten() == ((1, ('c', ), False, False), [1, 5]) + assert args.flatten() == ((1, ('c', ), False), [1, 5]) args = MockArgs([1], {'c': 5, 'd': 7}) - assert args.flatten() == ((1, ('c', 'd'), False, False), [1, 5, 7]) + assert args.flatten() == ((1, ('c', 'd'), False), [1, 5, 7]) args = MockArgs([1, 2, 3, 4, 5], {'e': 5, 'd': 7}) - assert args.flatten() == ((5, ('d', 'e'), False, False), [1, 2, 3, 4, 5, 7, 5]) + assert args.flatten() == ((5, ('d', 'e'), False), [1, 2, 3, 4, 5, 7, 5]) def test_stararg_flowspace_variable(self): var = object() - shape = ((2, ('g', ), True, False), [1, 2, 9, var]) + shape = ((2, ('g', ), True), [1, 2, 9, var]) args = MockArgs([1, 2], {'g': 9}, w_stararg=var) assert args.flatten() == shape @@ -117,30 +117,30 @@ assert args.flatten() == shape def test_fromshape(self): - shape = ((3, (), False, False), [1, 2, 3]) + shape = ((3, (), False), [1, 2, 3]) args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((1, (), False, False), [1]) + shape = ((1, (), False), [1]) args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((5, (), False, False), [1, 2, 3, 4, 5]) + shape = ((5, (), False), [1, 2, 3, 4, 5]) args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((1, ('b', 'c'), False, False), [1, 2, 3]) + shape = ((1, ('b', 'c'), False), [1, 2, 3]) args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((1, ('c', ), False, False), [1, 5]) + shape = ((1, ('c', ), False), [1, 5]) args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((1, ('c', 'd'), False, False), [1, 5, 7]) + shape = ((1, ('c', 'd'), False), [1, 5, 7]) args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((5, ('d', 'e'), False, False), [1, 2, 3, 4, 5, 7, 5]) + shape = ((5, ('d', 'e'), False), [1, 2, 3, 4, 5, 7, 5]) args = MockArgs.fromshape(*shape) assert args.flatten() == shape diff --git a/rpython/flowspace/argument.py b/rpython/flowspace/argument.py --- a/rpython/flowspace/argument.py +++ b/rpython/flowspace/argument.py @@ -77,10 +77,8 @@ """Represents the arguments passed into a function call, i.e. the `a, b, *c, **d` part in `return func(a, b, *c, **d)`. """ - def __init__(self, args_w, keywords=None, w_stararg=None, - w_starstararg=None): + def __init__(self, args_w, keywords=None, w_stararg=None): self.w_stararg = w_stararg - assert w_starstararg is None, "No **-unpacking in RPython" assert isinstance(args_w, list) self.arguments_w = args_w self.keywords = keywords or {} @@ -90,11 +88,10 @@ shape_cnt = len(self.arguments_w) # Number of positional args shape_keys = tuple(sorted(self.keywords)) shape_star = self.w_stararg is not None # Flag: presence of *arg - shape_stst = False # Flag: presence of **kwds data_w = self.arguments_w + [self.keywords[key] for key in shape_keys] if shape_star: data_w.append(self.w_stararg) - return (shape_cnt, shape_keys, shape_star, shape_stst), data_w + return (shape_cnt, shape_keys, shape_star), data_w def as_list(self): assert not self.keywords diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -926,7 +926,7 @@ key = w_key.value keywords[key] = w_value arguments = self.popvalues(n_arguments) - args = CallSpec(arguments, keywords, w_star, w_starstar) + args = CallSpec(arguments, keywords, w_star) w_function = self.popvalue() w_result = self.space.call(w_function, args) self.pushvalue(w_result) diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -748,8 +748,7 @@ for block in graph.iterblocks(): for op in block.operations: assert op.opname == "call_args" - assert op.args == map(Constant, - [g, (0, ('x',), False, False), 2]) + assert op.args == map(Constant, [g, (0, ('x',), False), 2]) def test_catch_importerror_1(self): def f(): diff --git a/rpython/rtyper/normalizecalls.py b/rpython/rtyper/normalizecalls.py --- a/rpython/rtyper/normalizecalls.py +++ b/rpython/rtyper/normalizecalls.py @@ -92,9 +92,8 @@ else: return False # nothing to do, all signatures already match - shape_cnt, shape_keys, shape_star, shape_stst = shape + shape_cnt, shape_keys, shape_star = shape assert not shape_star, "XXX not implemented" - assert not shape_stst, "XXX not implemented" # for the first 'shape_cnt' arguments we need to generalize to # a common type diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -49,8 +49,6 @@ arguments = ArgumentsForTranslation.fromshape( hop.args_s[1].const, # shape range(hop.nb_args-2)) - if arguments.w_starstararg is not None: - raise TyperError("**kwds call not implemented") if arguments.w_stararg is not None: # expand the *arg in-place -- it must be a tuple from rpython.rtyper.rtuple import TupleRepr @@ -303,7 +301,7 @@ s_callable = r_callable.get_s_callable() nbargs = len(hop.args_s) - 1 + nimplicitarg - s_sigs = r_func.get_s_signatures((nbargs, (), False, False)) + s_sigs = r_func.get_s_signatures((nbargs, (), False)) if len(s_sigs) != 1: raise TyperError("cannot hlinvoke callable %r with not uniform" "annotations: %r" % (r_callable, diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -268,8 +268,8 @@ # lowleveltype wouldn't be Void otherwise funcdesc, = self.s_pbc.descriptions tables = [] # find the simple call in the calltable - for key, table in self.callfamily.calltables.items(): - if not key[1] and not key[2] and not key[3]: + for shape, table in self.callfamily.calltables.items(): + if not shape[1] and not shape[2]: tables.append(table) if len(tables) != 1: raise TyperError("cannot pass a function with various call shapes") diff --git a/rpython/rtyper/test/test_normalizecalls.py b/rpython/rtyper/test/test_normalizecalls.py --- a/rpython/rtyper/test/test_normalizecalls.py +++ b/rpython/rtyper/test/test_normalizecalls.py @@ -185,7 +185,7 @@ .+Sub1.fn .+Sub2.fn are called with inconsistent numbers of arguments -sometimes with 2 arguments, sometimes with 1 +sometimes with \d arguments, sometimes with \d the callers of these functions are: .+otherfunc .+dummyfn""" @@ -224,7 +224,7 @@ from rpython.rtyper import annlowlevel # annotate, normalize and rtype fn after the fact - annhelper = annlowlevel.MixLevelHelperAnnotator(typer) + annhelper = annlowlevel.MixLevelHelperAnnotator(typer) graph = annhelper.getgraph(fn, [a.typeannotation(argtype) for argtype in argtypes], s_result) annhelper.finish() @@ -239,7 +239,7 @@ assert res == 100 res = llinterp.eval_graph(graphof(t, prefn), [2]) assert res == 201 - + return t def test_mix_after_recursion(self): @@ -248,7 +248,7 @@ return 2*prefn(n-1) else: return 1 - + t = TranslationContext() a = t.buildannotator() a.build_types(prefn, [int]) @@ -260,10 +260,10 @@ return 1 from rpython.rtyper import annlowlevel - annhelper = annlowlevel.MixLevelHelperAnnotator(typer) + annhelper = annlowlevel.MixLevelHelperAnnotator(typer) graph = annhelper.getgraph(f, [], annmodel.SomeInteger()) annhelper.finish() - + def test_add_more_subclasses(self): from rpython.rtyper import rclass from rpython.rtyper.lltypesystem.rclass import ll_issubclass From noreply at buildbot.pypy.org Mon Nov 18 23:01:06 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 18 Nov 2013 23:01:06 +0100 (CET) Subject: [pypy-commit] buildbot add-header-to-nightly: close branch Message-ID: <20131118220106.9C1FE1C026D@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: add-header-to-nightly Changeset: r897:76938234d843 Date: 2013-11-18 23:00 +0100 http://bitbucket.org/pypy/buildbot/changeset/76938234d843/ Log: close branch From noreply at buildbot.pypy.org Mon Nov 18 23:01:07 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 18 Nov 2013 23:01:07 +0100 (CET) Subject: [pypy-commit] buildbot default: merge add-header-to-nightly Message-ID: <20131118220107.E0D511C02D8@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r898:3f8d7754ec20 Date: 2013-11-18 23:00 +0100 http://bitbucket.org/pypy/buildbot/changeset/3f8d7754ec20/ Log: merge add-header-to-nightly diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -5,7 +5,8 @@ import cgi import urllib import sys -from twisted.web.static import File, DirectoryLister +from twisted.web.static import File, formatFileSize +from buildbot.status.web.base import DirectoryLister class PyPyTarball(object): @@ -142,98 +143,39 @@ names = File.listNames(self) if is_pypy_dir(names): names = self.sortBuildNames(names) - Listener = PyPyDirectoryLister else: names = self.sortDirectoryNames(File.listEntities(self)) - Listener = DirectoryLister + Listener = PyPyDirectoryLister return Listener(self.path, names, self.contentTypes, self.contentEncodings, self.defaultType) -class NumpyStatusList(File): - pass - class PyPyDirectoryLister(DirectoryLister): - template = """ - -%(header)s - - - - -

%(header)s

- - - - - - - - - - - - -%(tableContent)s - -
FilenameSizeDateown testsapplevel tests
- - - -""" - - linePattern = """ - %(text)s - %(size)s - %(date)s - %(own_summary)s - %(app_summary)s - -""" + '''template based, uses master/templates/directory.html + ''' def render(self, request): self.status = request.site.buildbot_service.getStatus() return DirectoryLister.render(self, request) - def _buildTableContent(self, elements): - tableContent = [] + def _getFilesAndDirectories(self, directory): + dirs, files = DirectoryLister._getFilesAndDirectories(self, directory) rowClasses = itertools.cycle(['odd', 'even']) - for element, rowClass in zip(elements, rowClasses): - element["class"] = rowClass - self._add_test_results(element, rowClass) - tableContent.append(self.linePattern % element) - return tableContent + for f, rowClass in zip(files, rowClasses): + f["class"] = rowClass + self._add_test_results(f, rowClass) + for d in dirs: + dirname = urllib.unquote(d['href']) + dd = py.path.local(self.path).join(dirname) + date = datetime.date.fromtimestamp(dd.mtime()) + d['date'] = date.isoformat() + # Assume dir is non-recursive + size = sum([f.size() for f in dd.listdir() if f.isfile()]) + d['size'] = formatFileSize(size) + + return dirs, files def _add_test_results(self, element, rowClass): filename = urllib.unquote(element['href']) @@ -292,3 +234,6 @@ else: return rowClass + '-failed' +class NumpyStatusList(PyPyList): + pass + diff --git a/master/templates/directory.html b/master/templates/directory.html new file mode 100644 --- /dev/null +++ b/master/templates/directory.html @@ -0,0 +1,94 @@ +{% extends "layout.html" %} +{% block morehead %} + + +{% endblock %} + +{% block content %} + +

Directory listing for {{ path }}

+ +{% set row_class = cycler('odd', 'even') %} + +{% set has_tests = files|join('', attribute='own_summary')|length > 0 or + files|join('', attribute='app_summary')|length > 0 %} + + + +{% if files|length > 0 %} + + + + +{% if has_tests %} + + +{% endif %} + +{% else %} + + + + +{% if has_tests %} + + +{% endif %} + +{% endif %} + +{% for d in directories %} + + + + +{% if has_tests %} + + +{% endif %} + +{% endfor %} + +{% for f in files %} + + + + +{% if has_tests %} + + +{% endif %} + +{% endfor %} +
FilenameSizeDateown testsapplevel tests
DirectorySizeDate
{{ d.text }}{{ d.size}}{{ d.date}}
{{ f.text }}{{ f.size }}{{ f.date }}{{ f.own_summary }}{{ f.app_summary }}
+ +{% endblock %} diff --git a/master/templates/layout.html b/master/templates/layout.html --- a/master/templates/layout.html +++ b/master/templates/layout.html @@ -23,19 +23,19 @@ {% block header -%}
Home - - - Speed - Summary (trunk) - Summary - Nightly builds + - Speed + - Numpy compatability + - Summary (trunk) + - Summary + - Nightly builds - Waterfall + - Waterfall - Builders + - Builders diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,7 @@ buildbot-slave==0.8.6p1 decorator==3.4.0 mock==1.0.1 -py==1.4.9 +py==1.4.18 pytest==2.2.4 python-dateutil==1.5 sqlalchemy-migrate==0.7.2 From noreply at buildbot.pypy.org Tue Nov 19 00:02:06 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 19 Nov 2013 00:02:06 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20131118230206.146F21C02D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68226:526680d19846 Date: 2013-11-18 15:01 -0800 http://bitbucket.org/pypy/pypy/changeset/526680d19846/ Log: merge default diff --git a/lib-python/2.7/test/test_old_mailbox.py b/lib-python/2.7/test/test_old_mailbox.py --- a/lib-python/2.7/test/test_old_mailbox.py +++ b/lib-python/2.7/test/test_old_mailbox.py @@ -73,7 +73,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -81,7 +83,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -90,8 +94,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 2) - self.assertTrue(self.mbox.next() is not None) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) diff --git a/lib-python/2.7/traceback.py b/lib-python/2.7/traceback.py --- a/lib-python/2.7/traceback.py +++ b/lib-python/2.7/traceback.py @@ -107,7 +107,7 @@ return list -def print_exception(etype, value, tb, limit=None, file=None): +def print_exception(etype, value, tb, limit=None, file=None, _encoding=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if @@ -123,7 +123,7 @@ if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) - lines = format_exception_only(etype, value) + lines = format_exception_only(etype, value, _encoding) for line in lines: _print(file, line, '') @@ -144,7 +144,7 @@ list = list + format_exception_only(etype, value) return list -def format_exception_only(etype, value): +def format_exception_only(etype, value, _encoding=None): """Format the exception part of a traceback. The arguments are the exception type and value such as given by @@ -170,12 +170,12 @@ if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or etype is None or type(etype) is str): - return [_format_final_exc_line(etype, value)] + return [_format_final_exc_line(etype, value, _encoding)] stype = etype.__name__ if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] + return [_format_final_exc_line(stype, value, _encoding)] # It was a syntax error; show exactly where the problem was found. lines = [] @@ -196,26 +196,26 @@ lines.append(' %s^\n' % ''.join(caretspace)) value = msg - lines.append(_format_final_exc_line(stype, value)) + lines.append(_format_final_exc_line(stype, value, _encoding)) return lines -def _format_final_exc_line(etype, value): +def _format_final_exc_line(etype, value, _encoding=None): """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) + valuestr = _some_str(value, _encoding) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line -def _some_str(value): +def _some_str(value, _encoding=None): try: return str(value) except Exception: pass try: value = unicode(value) - return value.encode("ascii", "backslashreplace") + return value.encode(_encoding or "ascii", "backslashreplace") except Exception: pass return '' % type(value).__name__ diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -268,10 +268,18 @@ if _has_load_extension(): _ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);") -_lib = _ffi.verify(""" -#include -""", libraries=['sqlite3'] -) +if sys.platform.startswith('freebsd'): + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'] + ) +else: + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'] + ) exported_sqlite_symbols = [ 'SQLITE_ALTER_TABLE', diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -112,6 +112,14 @@ incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform.startswith("freebsd"): + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include'] + linklibs = ['tk86', 'tcl86'] + libdirs = ['/usr/local/lib'] +elif sys.platform == 'win32': + incdirs = [] + linklibs = ['tcl85', 'tk85'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/pypy/doc/release-2.2.0.rst b/pypy/doc/release-2.2.0.rst --- a/pypy/doc/release-2.2.0.rst +++ b/pypy/doc/release-2.2.0.rst @@ -66,9 +66,9 @@ ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. You need to install NumPy separately with a virtualenv: ``pip install git+https://bitbucket.org/pypy/numpy.git``; - or by directly doing - ``git clone https://bitbucket.org/pypy/numpy.git``, - ``cd numpy``, ``python setup.py install``. + or directly: + ``git clone https://bitbucket.org/pypy/numpy.git``; + ``cd numpy``; ``pypy setup.py install``. * non-inlined calls have less overhead diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -9,3 +9,7 @@ .. branch: numpy-newbyteorder Clean up numpy types, add newbyteorder functionality + +.. branch windows-packaging +Package tk/tcl runtime with win32 + diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -73,11 +73,11 @@ https://bitbucket.org/pypy/pypy/downloads/local.zip Then expand it into the base directory (base_dir) and modify your environment to reflect this:: - set PATH=\bin;%PATH% - set INCLUDE=\include;%INCLUDE% - set LIB=\lib;%LIB% + set PATH=\bin;\tcltk\bin;%PATH% + set INCLUDE=\include;\tcltk\include;%INCLUDE% + set LIB=\lib;\tcltk\lib;%LIB% -Now you should be good to go. Read on for more information. +Now you should be good to go. Read on for more information. The Boehm garbage collector ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -109,11 +109,10 @@ The bz2 compression library ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Download http://bzip.org/1.0.5/bzip2-1.0.5.tar.gz and extract it in -the base directory. Then compile:: - - cd bzip2-1.0.5 + svn export http://svn.python.org/projects/external/bzip2-1.0.6 + cd bzip2-1.0.6 nmake -f makefile.msc + copy bzip.dll \bzip.dll The sqlite3 database library ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -122,8 +121,6 @@ wrapper is compiled when the module is imported for the first time. The sqlite3.dll should be version 3.6.21 for CPython2.7 compatablility. - - The expat XML parser ~~~~~~~~~~~~~~~~~~~~ @@ -146,13 +143,33 @@ use the one distributed by ActiveState, or the one from cygwin. In both case the perl interpreter must be found on the PATH. -Get http://www.openssl.org/source/openssl-0.9.8k.tar.gz and extract it -in the base directory. Then compile:: - + svn export http://svn.python.org/projects/external/openssl-0.9.8y + cd openssl-0.9.8y perl Configure VC-WIN32 ms\do_ms.bat nmake -f ms\nt.mak install +TkInter module support +~~~~~~~~~~~~~~~~~~~~~~ + +Note that much of this is taken from the cpython build process. +Tkinter is imported via cffi, so the module is optional. To recreate the tcltk +directory found for the release script, create the dlls, libs, headers and +runtime by running:: + + svn export http://svn.python.org/projects/external/tcl-8.5.2.1 tcl85 + svn export http://svn.python.org/projects/external/tk-8.5.2.0 tk85 + cd tcl85\win + nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 DEBUG=0 INSTALLDIR=..\..\tcltk clean all + nmake -f makefile.vc DEBUG=0 INSTALLDIR=..\..\tcltk install + cd ..\..\tk85\win + nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl85 clean all + nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl85 install + +Now you should have a tcktk\bin, tcltk\lib, and tcltk\include directory ready +for use. The release packaging script will pick up the tcltk runtime in the lib +directory and put it in the archive. + Using the mingw compiler ------------------------ diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -1061,7 +1061,8 @@ self.w_tmp_dir = self.space.wrap(tmp_dir) - foo_py = prefix.join('foo.py').write("pass") + foo_py = prefix.join('foo.py') + foo_py.write("pass") self.w_foo_py = self.space.wrap(str(foo_py)) def test_setup_bootstrap_path(self): diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1086,7 +1086,9 @@ assert strlenaddr == cast(BVoidP, strlen) def test_read_variable(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -1094,7 +1096,9 @@ assert stderr == cast(BVoidP, _testfunc(8)) def test_read_variable_as_unknown_length_array(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BCharP = new_pointer_type(new_primitive_type("char")) BArray = new_array_type(BCharP, None) @@ -1104,7 +1108,9 @@ # ^^ and not 'char[]', which is basically not allowed and would crash def test_write_variable(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') diff --git a/pypy/module/binascii/interp_crc32.py b/pypy/module/binascii/interp_crc32.py --- a/pypy/module/binascii/interp_crc32.py +++ b/pypy/module/binascii/interp_crc32.py @@ -1,17 +1,9 @@ from pypy.interpreter.gateway import unwrap_spec -from rpython.rlib.rarithmetic import r_uint, intmask -from rpython.rtyper.lltypesystem import rffi -from rpython.rlib.rzipfile import crc_32_tab +from rpython.rlib.rarithmetic import r_uint +from rpython.rlib import rzipfile @unwrap_spec(data='bufferstr', oldcrc='truncatedint_w') def crc32(space, data, oldcrc=0): "Compute the CRC-32 incrementally." - - crc = r_uint(rffi.cast(rffi.UINT, ~oldcrc)) # signed => 32-bit unsigned - - # in the following loop, we have always 0 <= crc < 2**32 - for c in data: - crc = crc_32_tab[(crc & 0xff) ^ ord(c)] ^ (crc >> 8) - - return space.wrap(rffi.cast(rffi.UINT, ~crc)) - + crc = rzipfile.crc32(data, r_uint(oldcrc)) + return space.wrap(crc) diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -73,7 +73,7 @@ # u = interp_marshal.StringUnmarshaller(space, space.wrapbytes(expected)) w_long = u.load_w_obj() - assert space.eq_w(w_long, w_obj) is True + assert space.eq_w(w_long, w_obj) for sign in [1L, -1L]: for i in range(100): diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -80,7 +80,7 @@ return scalar.Scalar(self.dtype, self.getitem(0)) return None - def get_view(self, orig_array, dtype, new_shape): + def get_view(self, space, orig_array, dtype, new_shape): strides, backstrides = support.calc_strides(new_shape, dtype, self.order) return SliceArray(self.start, strides, backstrides, new_shape, @@ -211,7 +211,15 @@ "field named %s not found" % idx)) return RecordChunk(idx) if (space.isinstance_w(w_idx, space.w_int) or - space.isinstance_w(w_idx, space.w_slice)): + space.isinstance_w(w_idx, space.w_slice)): + return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) + elif isinstance(w_idx, W_NDimArray) and \ + isinstance(w_idx.implementation, scalar.Scalar): + w_idx = w_idx.get_scalar_value().item(space) + if not space.isinstance_w(w_idx, space.w_int) and \ + not space.isinstance_w(w_idx, space.w_bool): + raise OperationError(space.w_IndexError, space.wrap( + "arrays used as indices must be of integer (or boolean) type")) return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) elif space.is_w(w_idx, space.w_None): return Chunks([NewAxisChunk()]) diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -68,9 +68,15 @@ def transpose(self, _): return self - def get_view(self, orig_array, dtype, new_shape): + def get_view(self, space, orig_array, dtype, new_shape): scalar = Scalar(dtype) - scalar.value = self.value.convert_to(dtype) + if dtype.is_str_or_unicode(): + scalar.value = dtype.coerce(space, space.wrap(self.value.raw_str())) + elif dtype.is_record_type(): + raise OperationError(space.w_NotImplementedError, space.wrap( + "viewing scalar as record not implemented")) + else: + scalar.value = dtype.itemtype.runpack_str(space, self.value.raw_str()) return scalar def get_real(self, orig_array): @@ -127,19 +133,20 @@ if space.len_w(w_idx) == 0: return self.get_scalar_value() raise OperationError(space.w_IndexError, - space.wrap("scalars cannot be indexed")) + space.wrap("0-d arrays can't be indexed")) def getitem_index(self, space, idx): raise OperationError(space.w_IndexError, - space.wrap("scalars cannot be indexed")) + space.wrap("0-d arrays can't be indexed")) def descr_setitem(self, space, _, w_idx, w_val): raise OperationError(space.w_IndexError, - space.wrap("scalars cannot be indexed")) + space.wrap("0-d arrays can't be indexed")) def setitem_index(self, space, idx, w_val): raise OperationError(space.w_IndexError, - space.wrap("scalars cannot be indexed")) + space.wrap("0-d arrays can't be indexed")) + def set_shape(self, space, orig_array, new_shape): if not new_shape: return self diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -106,16 +106,26 @@ args_w = [convert_to_array(space, w_arg) for w_arg in args_w] dtype = args_w[0].get_dtype() shape = args_w[0].get_shape()[:] - _axis = axis + ndim = len(shape) + orig_axis = axis if axis < 0: - _axis = len(shape) + axis + axis = ndim + axis + if ndim == 1 and axis != 0: + axis = 0 + if axis < 0 or axis >= ndim: + raise operationerrfmt(space.w_IndexError, + "axis %d out of bounds [0, %d)", orig_axis, ndim) for arr in args_w[1:]: + if len(arr.get_shape()) != ndim: + raise OperationError(space.w_ValueError, space.wrap( + "all the input arrays must have same number of dimensions")) for i, axis_size in enumerate(arr.get_shape()): - if len(arr.get_shape()) != len(shape) or (i != _axis and axis_size != shape[i]): + if i == axis: + shape[i] += axis_size + elif axis_size != shape[i]: raise OperationError(space.w_ValueError, space.wrap( - "all the input arrays must have same number of dimensions")) - elif i == _axis: - shape[i] += axis_size + "all the input array dimensions except for the " + "concatenation axis must match exactly")) a_dt = arr.get_dtype() if dtype.is_record_type() and a_dt.is_record_type(): # Record types must match @@ -129,19 +139,17 @@ space.wrap("invalid type promotion")) dtype = interp_ufuncs.find_binop_result_dtype(space, dtype, arr.get_dtype()) - if _axis < 0 or len(arr.get_shape()) <= _axis: - raise operationerrfmt(space.w_IndexError, "axis %d out of bounds [0, %d)", axis, len(shape)) # concatenate does not handle ndarray subtypes, it always returns a ndarray res = W_NDimArray.from_shape(space, shape, dtype, 'C') chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: - if arr.get_shape()[_axis] == 0: + if arr.get_shape()[axis] == 0: continue - chunks[_axis] = Chunk(axis_start, axis_start + arr.get_shape()[_axis], 1, - arr.get_shape()[_axis]) + chunks[axis] = Chunk(axis_start, axis_start + arr.get_shape()[axis], 1, + arr.get_shape()[axis]) Chunks(chunks).apply(space, res).implementation.setslice(space, arr) - axis_start += arr.get_shape()[_axis] + axis_start += arr.get_shape()[axis] return res @unwrap_spec(repeats=int) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -267,11 +267,19 @@ from pypy.module.micronumpy.interp_dtype import W_Dtype dtype = space.interp_w(W_Dtype, space.call_function(space.gettypefor(W_Dtype), w_dtype)) + if dtype.get_size() == 0: + raise OperationError(space.w_TypeError, space.wrap( + "data-type must not be 0-sized")) if dtype.get_size() != self.get_dtype(space).get_size(): raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) - raise OperationError(space.w_NotImplementedError, space.wrap( - "view not implelemnted yet")) + if dtype.is_str_or_unicode(): + return dtype.coerce(space, space.wrap(self.raw_str())) + elif dtype.is_record_type(): + raise OperationError(space.w_NotImplementedError, space.wrap( + "viewing scalar as record not implemented")) + else: + return dtype.itemtype.runpack_str(space, self.raw_str()) def descr_self(self, space): return self @@ -408,6 +416,9 @@ def get_dtype(self, space): return self.arr.dtype + def raw_str(self): + return self.arr.dtype.itemtype.to_str(self) + class W_VoidBox(W_FlexibleBox): def descr_getitem(self, space, w_item): if (space.isinstance_w(w_item, space.w_unicode) or diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -855,24 +855,21 @@ for k, v in typeinfo_partial.iteritems(): space.setitem(w_typeinfo, space.wrap(k), space.gettypefor(v)) for k, dtype in typeinfo_full.iteritems(): - itemsize = dtype.get_size() + itembits = dtype.get_size() * 8 items_w = [space.wrap(dtype.char), space.wrap(dtype.num), - space.wrap(itemsize * 8), # in case of changing - # number of bits per byte in the future - space.wrap(itemsize / - (2 if dtype.kind == NPY_COMPLEXLTR else 1) - or 1)] + space.wrap(itembits), + space.wrap(dtype.itemtype.get_element_size())] if dtype.is_int_type(): if dtype.kind == NPY_GENBOOLLTR: w_maxobj = space.wrap(1) w_minobj = space.wrap(0) elif dtype.is_signed(): - w_maxobj = space.wrap(r_longlong((1 << (itemsize*8 - 1)) + w_maxobj = space.wrap(r_longlong((1 << (itembits - 1)) - 1)) - w_minobj = space.wrap(r_longlong(-1) << (itemsize*8 - 1)) + w_minobj = space.wrap(r_longlong(-1) << (itembits - 1)) else: - w_maxobj = space.wrap(r_ulonglong(1 << (itemsize*8)) - 1) + w_maxobj = space.wrap(r_ulonglong(1 << itembits) - 1) w_minobj = space.wrap(0) items_w = items_w + [w_maxobj, w_minobj] items_w = items_w + [dtype.w_box_type] diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -198,7 +198,8 @@ prefix) def descr_getitem(self, space, w_idx): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type(): + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + and len(w_idx.get_shape()) > 0: return self.getitem_filter(space, w_idx) try: return self.implementation.descr_getitem(space, self, w_idx) @@ -212,7 +213,8 @@ self.implementation.setitem_index(space, index_list, w_value) def descr_setitem(self, space, w_idx, w_value): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type(): + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + and len(w_idx.get_shape()) > 0: self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) return try: @@ -704,7 +706,7 @@ return self return wrap_impl(space, space.type(self), self, self.implementation.get_view( - self, self.get_dtype(), new_shape)) + space, self, self.get_dtype(), new_shape)) def descr_strides(self, space): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -733,11 +735,14 @@ impl = self.implementation new_shape = self.get_shape()[:] dims = len(new_shape) + if new_itemsize == 0: + raise OperationError(space.w_TypeError, space.wrap( + "data-type must not be 0-sized")) if dims == 0: # Cannot resize scalars if old_itemsize != new_itemsize: raise OperationError(space.w_ValueError, space.wrap( - "new type not compatible with array shape")) + "new type not compatible with array.")) else: if dims == 1 or impl.get_strides()[0] < impl.get_strides()[-1]: # Column-major, resize first dimension @@ -751,7 +756,7 @@ raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) new_shape[-1] = new_shape[-1] * old_itemsize / new_itemsize - v = impl.get_view(self, dtype, new_shape) + v = impl.get_view(space, self, dtype, new_shape) w_ret = wrap_impl(space, w_type, self, v) return w_ret diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -490,7 +490,7 @@ if dtype.is_str_or_unicode(): val = dtype.coerce(space, space.wrap(sub)) else: - val = dtype.itemtype.runpack_str(sub) + val = dtype.itemtype.runpack_str(space, sub) ai.setitem(val) ai.next() i += 1 diff --git a/pypy/module/micronumpy/test/dummy_module.py b/pypy/module/micronumpy/test/dummy_module.py --- a/pypy/module/micronumpy/test/dummy_module.py +++ b/pypy/module/micronumpy/test/dummy_module.py @@ -26,7 +26,7 @@ del types types = ['Generic', 'Number', 'Integer', 'SignedInteger', 'UnsignedInteger', - 'Inexact', 'Floating', 'ComplexFloating', 'Character'] + 'Inexact', 'Floating', 'ComplexFloating', 'Flexible', 'Character'] for t in types: globals()[t.lower()] = typeinfo[t] diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -18,6 +18,22 @@ class AppTestDtypes(BaseAppTestDtypes): spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + def test_typeinfo(self): + import numpy as np + try: + from numpy.core.multiarray import typeinfo + except ImportError: + # running on dummy module + from numpypy import typeinfo + assert typeinfo['Number'] == np.number + assert typeinfo['LONGLONG'] == ('q', 9, 64, 8, 9223372036854775807L, + -9223372036854775808L, np.longlong) + assert typeinfo['VOID'] == ('V', 20, 0, 1, np.void) + assert typeinfo['BOOL'] == ('?', 0, 8, 1, 1, 0, np.bool_) + assert typeinfo['CFLOAT'] == ('F', 14, 64, 8, np.complex64) + assert typeinfo['CDOUBLE'] == ('D', 15, 128, 16, np.complex128) + assert typeinfo['HALF'] == ('e', 23, 16, 2, np.float16) + def test_dtype_basic(self): from numpypy import dtype @@ -816,12 +832,19 @@ assert (x == array(42)).all() class AppTestStrUnicodeDtypes(BaseNumpyAppTest): - def test_str_unicode(self): - skip('numpypy differs from numpy') + def test_mro(self): from numpypy import str_, unicode_, character, flexible, generic - - assert str_.mro() == [str_, str, character, flexible, generic, object] - assert unicode_.mro() == [unicode_, str, character, flexible, generic, object] + import sys + if '__pypy__' in sys.builtin_module_names: + assert str_.mro() == [str_, character, flexible, generic, + str, object] + assert unicode_.mro() == [unicode_, character, flexible, generic, + unicode, object] + else: + assert str_.mro() == [str_, str, character, flexible, + generic, object] + assert unicode_.mro() == [unicode_, unicode, character, + flexible, generic, object] def test_str_dtype(self): from numpypy import dtype, str_ @@ -862,12 +885,12 @@ def test_unicode_boxes(self): from numpypy import unicode_ - try: + import sys + if '__pypy__' in sys.builtin_module_names: + exc = raises(NotImplementedError, unicode_, 3) + assert exc.value.message.find('not supported yet') >= 0 + else: u = unicode_(3) - except NotImplementedError as e: - if e.message.find('not supported yet') >= 0: - skip('unicode box not implemented') - else: assert isinstance(u, str) def test_character_dtype(self): @@ -998,35 +1021,15 @@ assert a[0] == 1 assert (a + a)[1] == 4 -class AppTestPyPyOnly(BaseNumpyAppTest): - def setup_class(cls): - if option.runappdirect and '__pypy__' not in sys.builtin_module_names: - py.test.skip("pypy only test") - BaseNumpyAppTest.setup_class.im_func(cls) - - def test_typeinfo(self): - from numpypy import void, number, int64, bool_, complex64, complex128, float16 - try: - from numpy.core.multiarray import typeinfo - except ImportError: - # running on dummy module - from numpypy import typeinfo - assert typeinfo['Number'] == number - assert typeinfo['LONGLONG'] == ('q', 9, 64, 8, 9223372036854775807, -9223372036854775808, int64) - assert typeinfo['VOID'] == ('V', 20, 0, 1, void) - assert typeinfo['BOOL'] == ('?', 0, 8, 1, 1, 0, bool_) - assert typeinfo['CFLOAT'] == ('F', 14, 64, 4, complex64) - assert typeinfo['CDOUBLE'] == ('D', 15, 128, 8, complex128) - assert typeinfo['HALF'] == ('e', 23, 16, 2, float16) - class AppTestObjectDtypes(BaseNumpyAppTest): def test_scalar_from_object(self): from numpypy import array + import sys class Polynomial(object): pass - try: + if '__pypy__' in sys.builtin_module_names: + exc = raises(NotImplementedError, array, Polynomial()) + assert exc.value.message.find('unable to create dtype from objects') >= 0 + else: a = array(Polynomial()) assert a.shape == () - except NotImplementedError, e: - if e.message.find('unable to create dtype from objects')>=0: - skip('creating ojbect dtype not supported yet') diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -661,15 +661,17 @@ assert (b[newaxis] == [[2, 3, 4]]).all() def test_scalar(self): - from numpypy import array, dtype, int64 + from numpypy import array, dtype, int_ a = array(3) - raises(IndexError, "a[0]") - raises(IndexError, "a[0] = 5") + exc = raises(IndexError, "a[0]") + assert exc.value[0] == "0-d arrays can't be indexed" + exc = raises(IndexError, "a[0] = 5") + assert exc.value[0] == "0-d arrays can't be indexed" assert a.size == 1 assert a.shape == () assert a.dtype is dtype(int) b = a[()] - assert type(b) is int64 + assert type(b) is int_ assert b == 3 def test_len(self): @@ -1520,7 +1522,36 @@ assert arange(4, dtype='>c8').real.max() == 3.0 assert arange(4, dtype=' #define alloca _alloca #else - #include + # ifdef __FreeBSD__ + # include + # else + # include + # endif #endif static int (*python_callback)(int how_many, int *values); static int c_callback(int how_many, ...) { diff --git a/pypy/module/test_lib_pypy/pyrepl/__init__.py b/pypy/module/test_lib_pypy/pyrepl/__init__.py --- a/pypy/module/test_lib_pypy/pyrepl/__init__.py +++ b/pypy/module/test_lib_pypy/pyrepl/__init__.py @@ -1,3 +1,6 @@ import sys import lib_pypy.pyrepl sys.modules['pyrepl'] = sys.modules['lib_pypy.pyrepl'] + +if sys.platform.startswith('freebsd'): + raise Exception('XXX seems to hangs on FreeBSD9') diff --git a/pypy/module/test_lib_pypy/test_grp_extra.py b/pypy/module/test_lib_pypy/test_grp_extra.py --- a/pypy/module/test_lib_pypy/test_grp_extra.py +++ b/pypy/module/test_lib_pypy/test_grp_extra.py @@ -17,7 +17,7 @@ except KeyError: continue assert g.gr_gid == 0 - assert g.gr_mem == ['root'] or g.gr_mem == [] + assert 'root' in g.gr_mem or g.gr_mem == [] assert g.gr_name == name assert isinstance(g.gr_passwd, str) # usually just 'x', don't hope :-) break diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -67,18 +67,22 @@ raise PyPyCNotFound( 'Bogus path: %r does not exist (see docstring for more info)' % (os.path.dirname(str(pypy_c)),)) + win_extras = ['libpypy-c.dll', 'libexpat.dll', 'sqlite3.dll', + 'libeay32.dll', 'ssleay32.dll'] subprocess.check_call([str(pypy_c), '-c', 'import _sqlite3']) if not sys.platform == 'win32': subprocess.check_call([str(pypy_c), '-c', 'import _curses']) subprocess.check_call([str(pypy_c), '-c', 'import syslog']) - if not withouttk: - try: - subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) - except subprocess.CalledProcessError: - print >>sys.stderr, """Building Tk bindings failed. + if not withouttk: + try: + subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) + except subprocess.CalledProcessError: + print >>sys.stderr, """Building Tk bindings failed. You can either install Tk development headers package or add --without-tk option to skip packaging binary CFFI extension.""" - sys.exit(1) + sys.exit(1) + #Can the dependencies be found from cffi somehow? + win_extras += ['tcl85.dll', 'tk85.dll'] if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'): rename_pypy_c += '.exe' binaries = [(pypy_c, rename_pypy_c)] @@ -101,9 +105,7 @@ # Can't rename a DLL: it is always called 'libpypy-c.dll' - for extra in ['libpypy-c.dll', - 'libexpat.dll', 'sqlite3.dll', - 'libeay32.dll', 'ssleay32.dll']: + for extra in win_extras: p = pypy_c.dirpath().join(extra) if not p.check(): p = py.path.local.sysfind(extra) @@ -122,6 +124,19 @@ # XXX users will complain that they cannot compile cpyext # modules for windows, has the lib moved or are there no # exported functions in the dll so no import library is created? + if not withouttk: + try: + p = pypy_c.dirpath().join('tcl85.dll') + if not p.check(): + p = py.path.local.sysfind('tcl85.dll') + tktcldir = p.dirpath().join('..').join('lib') + shutil.copytree(str(tktcldir), str(pypydir.join('tcl'))) + except WindowsError: + print >>sys.stderr, """Packaging Tk runtime failed. +tk85.dll and tcl85.dll found, expecting to find runtime in ..\\lib +directory next to the dlls, as per build instructions.""" + import traceback;traceback.print_exc() + sys.exit(1) # Careful: to copy lib_pypy, copying just the hg-tracked files # would not be enough: there are also ctypes_config_cache/_*_cache.py. @@ -217,5 +232,11 @@ else: print_usage() + if os.environ.has_key("PYPY_PACKAGE_NOSTRIP"): + kw['nostrip'] = True + + if os.environ.has_key("PYPY_PACKAGE_WITHOUTTK"): + kw['withouttk'] = True + args = args[i:] package(*args, **kw) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3463,7 +3463,7 @@ py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) - + def test_setslice(self): def f(): @@ -4140,6 +4140,14 @@ a.build_types(f, [str]) assert ("Cannot prove that the object is callable" in exc.value.msg) + def test_str_format_error(self): + def f(s, x): + return s.format(x) + a = self.RPythonAnnotator() + with py.test.raises(annmodel.AnnotatorError) as exc: + a.build_types(f, [str, str]) + assert ("format() is not RPython" in exc.value.msg) + def g(n): return [0, 1, 2, n] diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -523,6 +523,9 @@ return SomeObject.op_contains(str, s_element) op_contains.can_only_throw = [] + def method_format(self, *args): + raise AnnotatorError("Method format() is not RPython") + class __extend__(SomeByteArray): def getslice(ba, s_start, s_stop): diff --git a/rpython/conftest.py b/rpython/conftest.py --- a/rpython/conftest.py +++ b/rpython/conftest.py @@ -24,20 +24,20 @@ def pytest_configure(config): global option option = config.option - -def _set_platform(opt, opt_str, value, parser): from rpython.config.translationoption import PLATFORMS from rpython.translator.platform import set_platform - if value not in PLATFORMS: - raise ValueError("%s not in %s" % (value, PLATFORMS)) - set_platform(value, None) + platform = config.option.platform + if platform not in PLATFORMS: + raise ValueError("%s not in %s" % (platform, PLATFORMS)) + set_platform(platform, None) + def pytest_addoption(parser): group = parser.getgroup("rpython options") group.addoption('--view', action="store_true", dest="view", default=False, help="view translation tests' flow graphs with Pygame") - group.addoption('-P', '--platform', action="callback", type="string", - default="host", callback=_set_platform, + group.addoption('-P', '--platform', action="store", dest="platform", + type="string", default="host", help="set up tests to use specified platform as compile/run target") group = parser.getgroup("JIT options") group.addoption('--viewloops', action="store_true", @@ -60,7 +60,7 @@ class LeakFinder: """Track memory allocations during test execution. - + So far, only used by the function lltype.malloc(flavor='raw'). """ def pytest_runtest_setup(self, __multicall__, item): diff --git a/rpython/jit/backend/x86/support.py b/rpython/jit/backend/x86/support.py --- a/rpython/jit/backend/x86/support.py +++ b/rpython/jit/backend/x86/support.py @@ -5,12 +5,13 @@ if WORD == 4: extra = ['-DPYPY_X86_CHECK_SSE2'] + if sys.platform != 'win32': + extra += ['-msse2', '-mfpmath=sse'] else: - extra = [] + extra = [] # the -m options above are always on by default on x86-64 if sys.platform != 'win32': - extra = ['-msse2', '-mfpmath=sse', - '-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra + extra = ['-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( compile_extra = extra, diff --git a/rpython/jit/backend/x86/test/test_ztranslation_basic.py b/rpython/jit/backend/x86/test/test_ztranslation_basic.py --- a/rpython/jit/backend/x86/test/test_ztranslation_basic.py +++ b/rpython/jit/backend/x86/test/test_ztranslation_basic.py @@ -1,11 +1,11 @@ from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTest -from rpython.translator.translator import TranslationContext -from rpython.config.translationoption import DEFL_GC +from rpython.jit.backend.x86.arch import WORD class TestTranslationX86(TranslationTest): def _check_cbuilder(self, cbuilder): # We assume here that we have sse2. If not, the CPUClass # needs to be changed to CPU386_NO_SSE2, but well. - assert '-msse2' in cbuilder.eci.compile_extra - assert '-mfpmath=sse' in cbuilder.eci.compile_extra + if WORD == 4: + assert '-msse2' in cbuilder.eci.compile_extra + assert '-mfpmath=sse' in cbuilder.eci.compile_extra diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -4,6 +4,7 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rarithmetic import r_uint +from rpython.rlib.objectmodel import we_are_translated from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform @@ -79,6 +80,38 @@ RTLD_NOW = cConfig.RTLD_NOW RTLD_LAZY = cConfig.RTLD_LAZY + _t_opened = {} + + def t_dlopen(name): + # for direct execution: can't use the regular way on FreeBSD :-( + # http://factor-language.blogspot.de/2009/02/note-about-libdl-functions-on-netbsd.html + import ctypes + if name: + name = rffi.charp2str(name) + else: + name = None + try: + res = ctypes.cdll.LoadLibrary(name) + except OSError, e: + raise DLOpenError(str(e)) + h = rffi.cast(rffi.VOIDP, res._handle) + _t_opened[rffi.cast(rffi.LONG, h)] = res + return h + + def t_dlclose(handle): + _t_opened.pop(rffi.cast(rffi.LONG, handle)) + return rffi.cast(rffi.INT, 0) + + def t_dldym(handle, name): + import ctypes + lib = _t_opened[rffi.cast(rffi.LONG, handle)] + try: + symbol = lib[name] + except AttributeError: + raise KeyError(name) + res = ctypes.cast(symbol, ctypes.c_void_p) + return rffi.cast(rffi.VOIDP, res.value or 0) + def dlerror(): # XXX this would never work on top of ll2ctypes, because # ctypes are calling dlerror itself, unsure if I can do much in this @@ -91,6 +124,8 @@ def dlopen(name, mode=-1): """ Wrapper around C-level dlopen """ + if not we_are_translated(): + return t_dlopen(name) if mode == -1: if RTLD_LOCAL is not None: mode = RTLD_LOCAL @@ -104,11 +139,16 @@ raise DLOpenError(err) return res - dlclose = c_dlclose + def dlclose(handle): + if not we_are_translated(): + return t_dlclose(handle) + return c_dlclose(handle) def dlsym(libhandle, name): """ Wrapper around C-level dlsym """ + if not we_are_translated(): + return t_dldym(libhandle, name) res = c_dlsym(libhandle, name) if not res: raise KeyError(name) diff --git a/rpython/rlib/rzipfile.py b/rpython/rlib/rzipfile.py --- a/rpython/rlib/rzipfile.py +++ b/rpython/rlib/rzipfile.py @@ -67,20 +67,12 @@ ] crc_32_tab = map(r_uint, crc_32_tab) -# XXX hack to get crc32 to work - -rcrc_32_tab = [r_uint(i) for i in crc_32_tab] - -def crc32(s, crc=0): - result = 0 - crc = ~r_uint(crc) & r_uint(0xffffffffL) +def crc32(s, crc=r_uint(0)): + crc = ~crc & r_uint(0xffffffffL) for c in s: - crc = rcrc_32_tab[(crc ^ r_uint(ord(c))) & 0xffL] ^ (crc >> 8) + crc = crc_32_tab[(crc ^ r_uint(ord(c))) & 0xffL] ^ (crc >> 8) #/* Note: (crc >> 8) MUST zero fill on left - - result = crc ^ r_uint(0xffffffffL) - - return result + return crc ^ r_uint(0xffffffffL) # parts copied from zipfile library implementation diff --git a/rpython/rlib/test/test_rdynload.py b/rpython/rlib/test/test_rdynload.py --- a/rpython/rlib/test/test_rdynload.py +++ b/rpython/rlib/test/test_rdynload.py @@ -21,3 +21,4 @@ lltype.Signed)), dlsym(lib, 'abs')) assert 1 == handle(1) assert 1 == handle(-1) + dlclose(lib) From noreply at buildbot.pypy.org Tue Nov 19 01:48:03 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 19 Nov 2013 01:48:03 +0100 (CET) Subject: [pypy-commit] pypy voidtype_strformat: cleanup Message-ID: <20131119004803.A5F111C026D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: voidtype_strformat Changeset: r68227:03bd65055e04 Date: 2013-11-18 19:32 -0500 http://bitbucket.org/pypy/pypy/changeset/03bd65055e04/ Log: cleanup diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3107,8 +3107,8 @@ [[7, 8, 9], [10, 11, 12]]])], dtype=dt) s = str(a) - assert s.endswith("[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])]") - + assert s.endswith("[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " \ + "[[7, 8, 9], [10, 11, 12]]])]") def test_issue_1589(self): import numpypy as numpy From noreply at buildbot.pypy.org Tue Nov 19 01:48:04 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 19 Nov 2013 01:48:04 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20131119004804.DEEA71C026D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68228:911cd20bb189 Date: 2013-11-18 19:35 -0500 http://bitbucket.org/pypy/pypy/changeset/911cd20bb189/ Log: cleanup diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -68,7 +68,8 @@ assert s.start == 1 assert s.strides == [2, 10, 50] assert s.backstrides == [6, 40, 100] - s = create_slice(self.space, a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), + Chunk(1, 0, 0, 1)]) assert s.shape == [2, 1] assert s.strides == [3, 10] assert s.backstrides == [3, 0] @@ -2052,7 +2053,8 @@ a = array([1, 2], dtype="int64") data = a.__reduce__() - assert data[2][4] == '\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00' + assert data[2][4] == '\x01\x00\x00\x00\x00\x00\x00\x00' \ + '\x02\x00\x00\x00\x00\x00\x00\x00' pickled_data = dumps(a) assert (loads(pickled_data) == a).all() @@ -2800,9 +2802,11 @@ assert k[0] == dtype('float16').type(5.) dt = array([5],dtype='longfloat').dtype if dt.itemsize == 12: - m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00', dtype='float96') + m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00', + dtype='float96') elif dt.itemsize == 16: - m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00\x00\x00\x00\x00', dtype='float128') + m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00' \ + '\x00\x00\x00\x00', dtype='float128') elif dt.itemsize == 8: skip('longfloat is float64') else: @@ -3027,7 +3031,8 @@ from numpypy import dtype, array, zeros d = dtype([("x", "int", 3), ("y", "float", 5)]) - a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) + a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), + ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) for v in ['x', u'x', 0, -2]: assert (a[0][v] == [1, 2, 3]).all() @@ -3037,7 +3042,8 @@ assert (a[1][v] == [5.5, 6.5, 7.5, 8.5, 9.5]).all() for v in [-3, 2]: exc = raises(IndexError, "a[0][%d]" % v) - assert exc.value.message == "invalid index (%d)" % (v + 2 if v < 0 else v) + assert exc.value.message == "invalid index (%d)" % \ + (v + 2 if v < 0 else v) exc = raises(IndexError, "a[0]['z']") assert exc.value.message == "invalid index" exc = raises(IndexError, "a[0][None]") @@ -3107,7 +3113,8 @@ from numpypy import dtype, array d = dtype([("x", "int", 3), ("y", "float", 5)]) - a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) + a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), + ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) assert len(list(a[0])) == 2 From noreply at buildbot.pypy.org Tue Nov 19 01:48:06 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 19 Nov 2013 01:48:06 +0100 (CET) Subject: [pypy-commit] pypy default: support order argument for array.tostring Message-ID: <20131119004806.0F0E91C026D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68229:5898d0122baa Date: 2013-11-18 19:47 -0500 http://bitbucket.org/pypy/pypy/changeset/5898d0122baa/ Log: support order argument for array.tostring diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -93,7 +93,11 @@ def descr_fill(self, space, w_value): self.fill(self.get_dtype().coerce(space, w_value)) - def descr_tostring(self, space): + def descr_tostring(self, space, w_order=None): + order = order_converter(space, w_order, NPY_CORDER) + if order == NPY_FORTRANORDER: + raise OperationError(space.w_NotImplementedError, space.wrap( + "unsupported value for order")) return space.wrap(loop.tostring(space, self)) def getitem_filter(self, space, arr): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2829,6 +2829,15 @@ assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03' assert array(0, dtype='i2').tostring() == '\x00\x00' + a = array([[1, 2], [3, 4]], dtype='i1') + for order in (None, False, 'C', 'K', 'a'): + assert a.tostring(order) == '\x01\x02\x03\x04' + import sys + for order in (True, 'F'): + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, a.tostring, order) + else: + assert a.tostring(order) == '\x01\x03\x02\x04' class AppTestRepr(BaseNumpyAppTest): From noreply at buildbot.pypy.org Tue Nov 19 02:11:29 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 19 Nov 2013 02:11:29 +0100 (CET) Subject: [pypy-commit] pypy default: provide dtype.descr for simple dtypes Message-ID: <20131119011129.949E31C00EC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68230:38abc15e315d Date: 2013-11-18 20:10 -0500 http://bitbucket.org/pypy/pypy/changeset/38abc15e315d/ Log: provide dtype.descr for simple dtypes diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -151,6 +151,14 @@ endian = NPY_NATBYTE return space.wrap("%s%s%s" % (endian, basic, size)) + def descr_get_descr(self, space): + if not self.is_record_type(): + return space.newlist([space.newtuple([space.wrap(""), + self.descr_get_str(space)])]) + else: + raise OperationError(space.w_NotImplementedError, space.wrap( + "descr not implemented for record types")) + def descr_get_base(self, space): return space.wrap(self.base) @@ -447,6 +455,7 @@ fields = GetSetProperty(W_Dtype.descr_get_fields), names = GetSetProperty(W_Dtype.descr_get_names), hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), + descr = GetSetProperty(W_Dtype.descr_get_descr), ) W_Dtype.typedef.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -832,6 +832,17 @@ assert x.dtype == int8 assert (x == array(42)).all() + def test_descr(self): + import numpy as np + assert np.dtype(' Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68231:5f4e3a547a2c Date: 2013-11-18 22:07 +0000 http://bitbucket.org/pypy/pypy/changeset/5f4e3a547a2c/ Log: make ArgumentsForTranslation a subclass of CallSpec diff --git a/rpython/annotator/argument.py b/rpython/annotator/argument.py --- a/rpython/annotator/argument.py +++ b/rpython/annotator/argument.py @@ -2,22 +2,9 @@ Arguments objects. """ from rpython.annotator.model import SomeTuple +from rpython.flowspace.argument import CallSpec -class ArgumentsForTranslation(object): - def __init__(self, args_w, keywords=None, w_stararg=None): - self.w_stararg = w_stararg - assert isinstance(args_w, list) - self.arguments_w = args_w - self.keywords = keywords or {} - - def __repr__(self): - """ NOT_RPYTHON """ - name = self.__class__.__name__ - if not self.keywords: - return '%s(%s)' % (name, self.arguments_w,) - else: - return '%s(%s, %s)' % (name, self.arguments_w, self.keywords) - +class ArgumentsForTranslation(CallSpec): @property def positional_args(self): if self.w_stararg is not None: diff --git a/rpython/flowspace/argument.py b/rpython/flowspace/argument.py --- a/rpython/flowspace/argument.py +++ b/rpython/flowspace/argument.py @@ -83,6 +83,14 @@ self.arguments_w = args_w self.keywords = keywords or {} + def __repr__(self): + """ NOT_RPYTHON """ + name = self.__class__.__name__ + if not self.keywords: + return '%s(%s)' % (name, self.arguments_w,) + else: + return '%s(%s, %s)' % (name, self.arguments_w, self.keywords) + def flatten(self): """ Argument <-> list of w_objects together with "shape" information """ shape_cnt = len(self.arguments_w) # Number of positional args From noreply at buildbot.pypy.org Tue Nov 19 02:43:58 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 19 Nov 2013 02:43:58 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: hg merge default Message-ID: <20131119014358.8B0981C026D@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68232:3aeee725a045 Date: 2013-11-19 01:41 +0000 http://bitbucket.org/pypy/pypy/changeset/3aeee725a045/ Log: hg merge default diff too long, truncating to 2000 out of 38174 lines diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,7 +1,38 @@ all: pypy-c +PYPY_EXECUTABLE := $(shell which pypy) +URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") + +ifeq ($(PYPY_EXECUTABLE),) +RUNINTERP = python +else +RUNINTERP = $(PYPY_EXECUTABLE) +endif + pypy-c: - @echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM" - @sleep 3 - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + @echo + @echo "====================================================================" +ifeq ($(PYPY_EXECUTABLE),) + @echo "Building a regular (jitting) version of PyPy, using CPython." + @echo "This takes around 2 hours and $(URAM) GB of RAM." + @echo "Note that pre-installing a PyPy binary would reduce this time" + @echo "and produce basically the same result." +else + @echo "Building a regular (jitting) version of PyPy, using" + @echo "$(PYPY_EXECUTABLE) to run the translation itself." + @echo "This takes up to 1 hour and $(URAM) GB of RAM." +endif + @echo + @echo "For more control (e.g. to use multiple CPU cores during part of" + @echo "the process) you need to run \`\`rpython/bin/rpython'' directly." + @echo "For more information see \`\`http://pypy.org/download.html''." + @echo "====================================================================" + @echo + @sleep 5 + $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + +# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are +# replaced with an opaque --jobserver option by the time this Makefile +# runs. We cannot get their original value either: +# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -1,5 +1,5 @@ ===================================== -PyPy: Python in Python Implementation +PyPy: Python in Python Implementation ===================================== Welcome to PyPy! @@ -26,12 +26,14 @@ Building ======== -build with:: +build with: - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py +.. code-block:: console + + $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest -to use virtualenv with the resulting pypy-c as the interpreter, you can +to use virtualenv with the resulting pypy-c as the interpreter; you can find more details about various installation schemes here: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -4,6 +4,21 @@ from __pypy__.builders import StringBuilder, UnicodeBuilder +class StringOrUnicodeBuilder(object): + def __init__(self): + self._builder = StringBuilder() + def append(self, string): + try: + self._builder.append(string) + except UnicodeEncodeError: + ub = UnicodeBuilder() + ub.append(self._builder.build()) + self._builder = ub + ub.append(string) + def build(self): + return self._builder.build() + + ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') @@ -192,7 +207,7 @@ if self.ensure_ascii: builder = StringBuilder() else: - builder = UnicodeBuilder() + builder = StringOrUnicodeBuilder() self.__encode(o, markers, builder, 0) return builder.build() diff --git a/lib-python/2.7/string.py b/lib-python/2.7/string.py --- a/lib-python/2.7/string.py +++ b/lib-python/2.7/string.py @@ -66,16 +66,17 @@ must be of the same length. """ - if len(fromstr) != len(tostr): + n = len(fromstr) + if n != len(tostr): raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) + # this function has been rewritten to suit PyPy better; it is + # almost 10x faster than the original. + buf = bytearray(256) + for i in range(256): + buf[i] = i + for i in range(n): + buf[ord(fromstr[i])] = tostr[i] + return str(buf) diff --git a/lib-python/2.7/test/keycert.pem b/lib-python/2.7/test/keycert.pem --- a/lib-python/2.7/test/keycert.pem +++ b/lib-python/2.7/test/keycert.pem @@ -1,32 +1,31 @@ ------BEGIN RSA PRIVATE KEY----- -MIICXwIBAAKBgQC8ddrhm+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9L -opdJhTvbGfEj0DQs1IE8M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVH -fhi/VwovESJlaBOp+WMnfhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQAB -AoGBAK0FZpaKj6WnJZN0RqhhK+ggtBWwBnc0U/ozgKz2j1s3fsShYeiGtW6CK5nU -D1dZ5wzhbGThI7LiOXDvRucc9n7vUgi0alqPQ/PFodPxAN/eEYkmXQ7W2k7zwsDA -IUK0KUhktQbLu8qF/m8qM86ba9y9/9YkXuQbZ3COl5ahTZrhAkEA301P08RKv3KM -oXnGU2UHTuJ1MAD2hOrPxjD4/wxA/39EWG9bZczbJyggB4RHu0I3NOSFjAm3HQm0 -ANOu5QK9owJBANgOeLfNNcF4pp+UikRFqxk5hULqRAWzVxVrWe85FlPm0VVmHbb/ -loif7mqjU8o1jTd/LM7RD9f2usZyE2psaw8CQQCNLhkpX3KO5kKJmS9N7JMZSc4j -oog58yeYO8BBqKKzpug0LXuQultYv2K4veaIO04iL9VLe5z9S/Q1jaCHBBuXAkEA -z8gjGoi1AOp6PBBLZNsncCvcV/0aC+1se4HxTNo2+duKSDnbq+ljqOM+E7odU+Nq -ewvIWOG//e8fssd0mq3HywJBAJ8l/c8GVmrpFTx8r/nZ2Pyyjt3dH1widooDXYSV -q6Gbf41Llo5sYAtmxdndTLASuHKecacTgZVhy0FryZpLKrU= ------END RSA PRIVATE KEY----- +-----BEGIN PRIVATE KEY----- +MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANtb0+YrKuxevGpm +LrjaUhZSgz6zFAmuGFmKmUbdjmfv9zSmmdsQIksK++jK0Be9LeZy20j6ahOfuVa0 +ufEmPoP7Fy4hXegKZR9cCWcIe/A6H2xWF1IIJLRTLaU8ol/I7T+um5HD5AwAwNPP +USNU0Eegmvp+xxWu3NX2m1Veot85AgMBAAECgYA3ZdZ673X0oexFlq7AAmrutkHt +CL7LvwrpOiaBjhyTxTeSNWzvtQBkIU8DOI0bIazA4UreAFffwtvEuPmonDb3F+Iq +SMAu42XcGyVZEl+gHlTPU9XRX7nTOXVt+MlRRRxL6t9GkGfUAXI3XxJDXW3c0vBK +UL9xqD8cORXOfE06rQJBAP8mEX1ERkR64Ptsoe4281vjTlNfIbs7NMPkUnrn9N/Y +BLhjNIfQ3HFZG8BTMLfX7kCS9D593DW5tV4Z9BP/c6cCQQDcFzCcVArNh2JSywOQ +ZfTfRbJg/Z5Lt9Fkngv1meeGNPgIMLN8Sg679pAOOWmzdMO3V706rNPzSVMME7E5 +oPIfAkEA8pDddarP5tCvTTgUpmTFbakm0KoTZm2+FzHcnA4jRh+XNTjTOv98Y6Ik +eO5d1ZnKXseWvkZncQgxfdnMqqpj5wJAcNq/RVne1DbYlwWchT2Si65MYmmJ8t+F +0mcsULqjOnEMwf5e+ptq5LzwbyrHZYq5FNk7ocufPv/ZQrcSSC+cFwJBAKvOJByS +x56qyGeZLOQlWS2JS3KJo59XuLFGqcbgN9Om9xFa41Yb4N9NvplFivsvZdw3m1Q/ +SPIXQuT8RMPDVNQ= +-----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- -MIICpzCCAhCgAwIBAgIJAP+qStv1cIGNMA0GCSqGSIb3DQEBBQUAMIGJMQswCQYD -VQQGEwJVUzERMA8GA1UECBMIRGVsYXdhcmUxEzARBgNVBAcTCldpbG1pbmd0b24x -IzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9uMQwwCgYDVQQLEwNT -U0wxHzAdBgNVBAMTFnNvbWVtYWNoaW5lLnB5dGhvbi5vcmcwHhcNMDcwODI3MTY1 -NDUwWhcNMTMwMjE2MTY1NDUwWjCBiTELMAkGA1UEBhMCVVMxETAPBgNVBAgTCERl -bGF3YXJlMRMwEQYDVQQHEwpXaWxtaW5ndG9uMSMwIQYDVQQKExpQeXRob24gU29m -dHdhcmUgRm91bmRhdGlvbjEMMAoGA1UECxMDU1NMMR8wHQYDVQQDExZzb21lbWFj -aGluZS5weXRob24ub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC8ddrh -m+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9LopdJhTvbGfEj0DQs1IE8 -M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVHfhi/VwovESJlaBOp+WMn -fhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQABoxUwEzARBglghkgBhvhC -AQEEBAMCBkAwDQYJKoZIhvcNAQEFBQADgYEAF4Q5BVqmCOLv1n8je/Jw9K669VXb -08hyGzQhkemEBYQd6fzQ9A/1ZzHkJKb1P6yreOLSEh4KcxYPyrLRC1ll8nr5OlCx -CMhKkTnR6qBsdNV0XtdU2+N25hqW+Ma4ZeqsN/iiJVCGNOZGnvQuvCAGWF8+J/f/ -iHkC6gGdBJhogs4= +MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV +BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw +MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH +Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k +YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw +gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7 +6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt +pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw +FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd +BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G +lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1 +CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/sha256.pem b/lib-python/2.7/test/sha256.pem --- a/lib-python/2.7/test/sha256.pem +++ b/lib-python/2.7/test/sha256.pem @@ -1,129 +1,128 @@ # Certificate chain for https://sha256.tbs-internet.com - 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=sha-256 production/CN=sha256.tbs-internet.com - i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC + 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=Certificats TBS X509/CN=ecom.tbs-x509.com + i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business -----BEGIN CERTIFICATE----- -MIIGXTCCBUWgAwIBAgIRAMmag+ygSAdxZsbyzYjhuW0wDQYJKoZIhvcNAQELBQAw -gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl +MIIGTjCCBTagAwIBAgIQOh3d9dNDPq1cSdJmEiMpqDANBgkqhkiG9w0BAQUFADCB +yTELMAkGA1UEBhMCRlIxETAPBgNVBAgTCENhbHZhZG9zMQ0wCwYDVQQHEwRDYWVu +MRUwEwYDVQQKEwxUQlMgSU5URVJORVQxSDBGBgNVBAsTP1Rlcm1zIGFuZCBDb25k +aXRpb25zOiBodHRwOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0EvcmVwb3NpdG9y +eTEYMBYGA1UECxMPVEJTIElOVEVSTkVUIENBMR0wGwYDVQQDExRUQlMgWDUwOSBD +QSBidXNpbmVzczAeFw0xMTAxMjUwMDAwMDBaFw0xMzAyMDUyMzU5NTlaMIHHMQsw +CQYDVQQGEwJGUjEOMAwGA1UEERMFMTQwMDAxETAPBgNVBAgTCENhbHZhZG9zMQ0w +CwYDVQQHEwRDQUVOMRswGQYDVQQJExIyMiBydWUgZGUgQnJldGFnbmUxFTATBgNV +BAoTDFRCUyBJTlRFUk5FVDEXMBUGA1UECxMOMDAwMiA0NDA0NDM4MTAxHTAbBgNV +BAsTFENlcnRpZmljYXRzIFRCUyBYNTA5MRowGAYDVQQDExFlY29tLnRicy14NTA5 +LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKRrlHUnJ++1lpcg +jtYco7cdmRe+EEfTmwPfCdfV3G1QfsTSvY6FfMpm/83pqHfT+4ANwr18wD9ZrAEN +G16mf9VdCGK12+TP7DmqeZyGIqlFFoahQnmb8EarvE43/1UeQ2CV9XmzwZvpqeli +LfXsFonawrY3H6ZnMwS64St61Z+9gdyuZ/RbsoZBbT5KUjDEG844QRU4OT1IGeEI +eY5NM5RNIh6ZNhVtqeeCxMS7afONkHQrOco73RdSTRck/Hj96Ofl3MHNHryr+AMK +DGFk1kLCZGpPdXtkxXvaDeQoiYDlil26CWc+YK6xyDPMdsWvoG14ZLyCpzMXA7/7 +4YAQRH0CAwEAAaOCAjAwggIsMB8GA1UdIwQYMBaAFBoJBMz5CY+7HqDO1KQUf0vV +I1jNMB0GA1UdDgQWBBQgOU8HsWzbmD4WZP5Wtdw7jca2WDAOBgNVHQ8BAf8EBAMC +BaAwDAYDVR0TAQH/BAIwADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw +TAYDVR0gBEUwQzBBBgsrBgEEAYDlNwIBATAyMDAGCCsGAQUFBwIBFiRodHRwczov +L3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL0NQUzEwdwYDVR0fBHAwbjA3oDWgM4Yx +aHR0cDovL2NybC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNy +bDAzoDGgL4YtaHR0cDovL2NybC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5l +c3MuY3JsMIGwBggrBgEFBQcBAQSBozCBoDA9BggrBgEFBQcwAoYxaHR0cDovL2Ny +dC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNydDA5BggrBgEF +BQcwAoYtaHR0cDovL2NydC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5lc3Mu +Y3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wMwYDVR0R +BCwwKoIRZWNvbS50YnMteDUwOS5jb22CFXd3dy5lY29tLnRicy14NTA5LmNvbTAN +BgkqhkiG9w0BAQUFAAOCAQEArT4NHfbY87bGAw8lPV4DmHlmuDuVp/y7ltO3Ynse +3Rz8RxW2AzuO0Oy2F0Cu4yWKtMyEyMXyHqWtae7ElRbdTu5w5GwVBLJHClCzC8S9 +SpgMMQTx3Rgn8vjkHuU9VZQlulZyiPK7yunjc7c310S9FRZ7XxOwf8Nnx4WnB+No +WrfApzhhQl31w+RyrNxZe58hCfDDHmevRvwLjQ785ZoQXJDj2j3qAD4aI2yB8lB5 +oaE1jlCJzC7Kmz/Y9jzfmv/zAs1LQTm9ktevv4BTUFaGjv9jxnQ1xnS862ZiouLW +zZYIlYPf4F6JjXGiIQgQRglILUfq3ftJd9/ok9W9ZF8h8w== +-----END CERTIFICATE----- + 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business + i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root +-----BEGIN CERTIFICATE----- +MIIFPzCCBCegAwIBAgIQDlBz/++iRSmLDeVRHT/hADANBgkqhkiG9w0BAQUFADBv +MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk +ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF +eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDcwOTE4MTkyMlow +gckxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv -cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg -Q0EgU0dDMB4XDTEwMDIxODAwMDAwMFoXDTEyMDIxOTIzNTk1OVowgcsxCzAJBgNV -BAYTAkZSMQ4wDAYDVQQREwUxNDAwMDERMA8GA1UECBMIQ2FsdmFkb3MxDTALBgNV -BAcTBENBRU4xGzAZBgNVBAkTEjIyIHJ1ZSBkZSBCcmV0YWduZTEVMBMGA1UEChMM -VEJTIElOVEVSTkVUMRcwFQYDVQQLEw4wMDAyIDQ0MDQ0MzgxMDEbMBkGA1UECxMS -c2hhLTI1NiBwcm9kdWN0aW9uMSAwHgYDVQQDExdzaGEyNTYudGJzLWludGVybmV0 -LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbuM8VT7f0nntwu -N3F7v9KIBlhKNAxqCrziOXU5iqUt8HrQB3DtHbdmII+CpVUlwlmepsx6G+srEZ9a -MIGAy0nxi5aLb7watkyIdPjJTMvTUBQ/+RPWzt5JtYbbY9BlJ+yci0dctP74f4NU -ISLtlrEjUbf2gTohLrcE01TfmOF6PDEbB5PKDi38cB3NzKfizWfrOaJW6Q1C1qOJ -y4/4jkUREX1UFUIxzx7v62VfjXSGlcjGpBX1fvtABQOSLeE0a6gciDZs1REqroFf -5eXtqYphpTa14Z83ITXMfgg5Nze1VtMnzI9Qx4blYBw4dgQVEuIsYr7FDBOITDzc -VEVXZx0CAwEAAaOCAj8wggI7MB8GA1UdIwQYMBaAFAdEdoWTKLx/bXjSCuv6TEvf -2YIfMB0GA1UdDgQWBBSJKI/AYVI9RQNY0QPIqc8ej2QivTAOBgNVHQ8BAf8EBAMC -BaAwDAYDVR0TAQH/BAIwADA0BgNVHSUELTArBggrBgEFBQcDAQYIKwYBBQUHAwIG -CisGAQQBgjcKAwMGCWCGSAGG+EIEATBMBgNVHSAERTBDMEEGCysGAQQBgOU3AgQB -MDIwMAYIKwYBBQUHAgEWJGh0dHBzOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0Ev -Q1BTNDBtBgNVHR8EZjBkMDKgMKAuhixodHRwOi8vY3JsLnRicy1pbnRlcm5ldC5j -b20vVEJTWDUwOUNBU0dDLmNybDAuoCygKoYoaHR0cDovL2NybC50YnMteDUwOS5j -b20vVEJTWDUwOUNBU0dDLmNybDCBpgYIKwYBBQUHAQEEgZkwgZYwOAYIKwYBBQUH -MAKGLGh0dHA6Ly9jcnQudGJzLWludGVybmV0LmNvbS9UQlNYNTA5Q0FTR0MuY3J0 -MDQGCCsGAQUFBzAChihodHRwOi8vY3J0LnRicy14NTA5LmNvbS9UQlNYNTA5Q0FT -R0MuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wPwYD -VR0RBDgwNoIXc2hhMjU2LnRicy1pbnRlcm5ldC5jb22CG3d3dy5zaGEyNTYudGJz -LWludGVybmV0LmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAA5NL0D4QSqhErhlkdPmz -XtiMvdGL+ZehM4coTRIpasM/Agt36Rc0NzCvnQwKE+wkngg1Gy2qe7Q0E/ziqBtB -fZYzdVgu1zdiL4kTaf+wFKYAFGsFbyeEmXysy+CMwaNoF2vpSjCU1UD56bEnTX/W -fxVZYxtBQUpnu2wOsm8cDZuZRv9XrYgAhGj9Tt6F0aVHSDGn59uwShG1+BVF/uju -SCyPTTjL1oc7YElJUzR/x4mQJYvtQI8gDIDAGEOs7v3R/gKa5EMfbUQUI4C84UbI -Yz09Jdnws/MkC/Hm1BZEqk89u7Hvfv+oHqEb0XaUo0TDfsxE0M1sMdnLb91QNQBm -UQ== ------END CERTIFICATE----- - 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC - i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root ------BEGIN CERTIFICATE----- -MIIFVjCCBD6gAwIBAgIQXpDZ0ETJMV02WTx3GTnhhTANBgkqhkiG9w0BAQUFADBv -MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk -ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF -eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDYyNDE5MDYzMFow -gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl -bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u -ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv -cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg -Q0EgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsgOkO3f7wzN6 -rOjg45tR5vjBfzK7qmV9IBxb/QW9EEXxG+E7FNhZqQLtwGBKoSsHTnQqV75wWMk0 -9tinWvftBkSpj5sTi/8cbzJfUvTSVYh3Qxv6AVVjMMH/ruLjE6y+4PoaPs8WoYAQ -ts5R4Z1g8c/WnTepLst2x0/Wv7GmuoQi+gXvHU6YrBiu7XkeYhzc95QdviWSJRDk -owhb5K43qhcvjRmBfO/paGlCliDGZp8mHwrI21mwobWpVjTxZRwYO3bd4+TGcI4G -Ie5wmHwE8F7SK1tgSqbBacKjDa93j7txKkfz/Yd2n7TGqOXiHPsJpG655vrKtnXk -9vs1zoDeJQIDAQABo4IBljCCAZIwHQYDVR0OBBYEFAdEdoWTKLx/bXjSCuv6TEvf -2YIfMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMCAGA1UdJQQZ -MBcGCisGAQQBgjcKAwMGCWCGSAGG+EIEATAYBgNVHSAEETAPMA0GCysGAQQBgOU3 -AgQBMHsGA1UdHwR0MHIwOKA2oDSGMmh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL0Fk -ZFRydXN0RXh0ZXJuYWxDQVJvb3QuY3JsMDagNKAyhjBodHRwOi8vY3JsLmNvbW9k -by5uZXQvQWRkVHJ1c3RFeHRlcm5hbENBUm9vdC5jcmwwgYAGCCsGAQUFBwEBBHQw -cjA4BggrBgEFBQcwAoYsaHR0cDovL2NydC5jb21vZG9jYS5jb20vQWRkVHJ1c3RV -VE5TR0NDQS5jcnQwNgYIKwYBBQUHMAKGKmh0dHA6Ly9jcnQuY29tb2RvLm5ldC9B -ZGRUcnVzdFVUTlNHQ0NBLmNydDARBglghkgBhvhCAQEEBAMCAgQwDQYJKoZIhvcN -AQEFBQADggEBAK2zEzs+jcIrVK9oDkdDZNvhuBYTdCfpxfFs+OAujW0bIfJAy232 -euVsnJm6u/+OrqKudD2tad2BbejLLXhMZViaCmK7D9nrXHx4te5EP8rL19SUVqLY -1pTnv5dhNgEgvA7n5lIzDSYs7yRLsr7HJsYPr6SeYSuZizyX1SNz7ooJ32/F3X98 -RB0Mlc/E0OyOrkQ9/y5IrnpnaSora8CnUrV5XNOg+kyCz9edCyx4D5wXYcwZPVWz -8aDqquESrezPyjtfi4WRO4s/VD3HLZvOxzMrWAVYCDG9FxaOhF0QGuuG1F7F3GKV -v6prNyCl016kRl2j1UT+a7gLd8fA25A4C9E= +cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEdMBsGA1UEAxMUVEJTIFg1MDkg +Q0EgYnVzaW5lc3MwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDB1PAU +qudCcz3tmyGcf+u6EkZqonKKHrV4gZYbvVkIRojmmlhfi/jwvpHvo8bqSt/9Rj5S +jhCDW0pcbI+IPPtD1Jy+CHNSfnMqVDy6CKQ3p5maTzCMG6ZT+XjnvcND5v+FtaiB +xk1iCX6uvt0jeUtdZvYbyytsSDE6c3Y5//wRxOF8tM1JxibwO3pyER26jbbN2gQz +m/EkdGjLdJ4svPk23WDAvQ6G0/z2LcAaJB+XLfqRwfQpHQvfKa1uTi8PivC8qtip +rmNQMMPMjxSK2azX8cKjjTDJiUKaCb4VHlJDWKEsCFRpgJAoAuX8f7Yfs1M4esGo +sWb3PGspK3O22uIlAgMBAAGjggF6MIIBdjAdBgNVHQ4EFgQUGgkEzPkJj7seoM7U +pBR/S9UjWM0wDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwGAYD +VR0gBBEwDzANBgsrBgEEAYDlNwIBATB7BgNVHR8EdDByMDigNqA0hjJodHRwOi8v +Y3JsLmNvbW9kb2NhLmNvbS9BZGRUcnVzdEV4dGVybmFsQ0FSb290LmNybDA2oDSg +MoYwaHR0cDovL2NybC5jb21vZG8ubmV0L0FkZFRydXN0RXh0ZXJuYWxDQVJvb3Qu +Y3JsMIGGBggrBgEFBQcBAQR6MHgwOwYIKwYBBQUHMAKGL2h0dHA6Ly9jcnQuY29t +b2RvY2EuY29tL0FkZFRydXN0VVROU2VydmVyQ0EuY3J0MDkGCCsGAQUFBzAChi1o +dHRwOi8vY3J0LmNvbW9kby5uZXQvQWRkVHJ1c3RVVE5TZXJ2ZXJDQS5jcnQwEQYJ +YIZIAYb4QgEBBAQDAgIEMA0GCSqGSIb3DQEBBQUAA4IBAQA7mqrMgk/MrE6QnbNA +h4nRCn2ti4bg4w2C3lB6bSvRPnYwuNw9Jb8vuKkNFzRDxNJXqVDZdfFW5CVQJuyd +nfAx83+wk+spzvFaE1KhFYfN9G9pQfXUfvDRoIcJgPEKUXL1wRiOG+IjU3VVI8pg +IgqHkr7ylln5i5zCiFAPuIJmYUSFg/gxH5xkCNcjJqqrHrHatJr6Qrrke93joupw +oU1njfAcZtYp6fbiK6u2b1pJqwkVBE8RsfLnPhRj+SFbpvjv8Od7o/ieJhFIYQNU +k2jX2u8qZnAiNw93LZW9lpYjtuvMXq8QQppENNja5b53q7UwI+lU7ZGjZ7quuESp +J6/5 -----END CERTIFICATE----- 2 s:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root - i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware -----BEGIN CERTIFICATE----- -MIIEZjCCA06gAwIBAgIQUSYKkxzif5zDpV954HKugjANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +MIIETzCCAzegAwIBAgIQHM5EYpUZep1jUvnyI6m2mDANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw0wNTA2MDcwODA5MTBaFw0xOTA2MjQxOTA2MzBaMG8xCzAJBgNVBAYT -AlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0 -ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB -IFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC39xoz5vIABC05 -4E5b7R+8bA/Ntfojts7emxEzl6QpTH2Tn71KvJPtAxrjj8/lbVBa1pcplFqAsEl6 -2y6V/bjKvzc4LR4+kUGtcFbH8E8/6DKedMrIkFTpxl8PeJ2aQDwOrGGqXhSPnoeh -alDc15pOrwWzpnGUnHGzUGAKxxOdOAeGAqjpqGkmGJCrTLBPI6s6T4TY386f4Wlv -u9dC12tE5Met7m1BX3JacQg3s3llpFmglDf3AC8NwpJy2tA4ctsUqEXEXSp9t7TW -xO6szRNEt8kr3UMAJfphuWlqWCMRt6czj1Z1WfXNKddGtworZbbTQm8Vsrh7++/p -XVPVNFonAgMBAAGjgdgwgdUwHwYDVR0jBBgwFoAUUzLRs89/+uDxoF2FTpLSnkUd -tE8wHQYDVR0OBBYEFK29mHo0tCb3+sQmVO8DveAky1QaMA4GA1UdDwEB/wQEAwIB -BjAPBgNVHRMBAf8EBTADAQH/MBEGCWCGSAGG+EIBAQQEAwIBAjAgBgNVHSUEGTAX -BgorBgEEAYI3CgMDBglghkgBhvhCBAEwPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDov -L2NybC51c2VydHJ1c3QuY29tL1VUTi1EQVRBQ29ycFNHQy5jcmwwDQYJKoZIhvcN -AQEFBQADggEBAMbuUxdoFLJRIh6QWA2U/b3xcOWGLcM2MY9USEbnLQg3vGwKYOEO -rVE04BKT6b64q7gmtOmWPSiPrmQH/uAB7MXjkesYoPF1ftsK5p+R26+udd8jkWjd -FwBaS/9kbHDrARrQkNnHptZt9hPk/7XJ0h4qy7ElQyZ42TCbTg0evmnv3+r+LbPM -+bDdtRTKkdSytaX7ARmjR3mfnYyVhzT4HziS2jamEfpr62vp3EV4FTkG101B5CHI -3C+H0be/SGB1pWLLJN47YaApIKa+xWycxOkKaSLvkTr6Jq/RW0GnOuL4OAdCq8Fb -+M5tug8EPzI0rNwEKNdwMBQmBsTkm5jVz3g= +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNMDUwNjA3MDgwOTEwWhcNMTkwNzA5MTgxOTIyWjBvMQswCQYD +VQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0 +IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5h +bCBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt/caM+by +AAQtOeBOW+0fvGwPzbX6I7bO3psRM5ekKUx9k5+9SryT7QMa44/P5W1QWtaXKZRa +gLBJetsulf24yr83OC0ePpFBrXBWx/BPP+gynnTKyJBU6cZfD3idmkA8Dqxhql4U +j56HoWpQ3NeaTq8Fs6ZxlJxxs1BgCscTnTgHhgKo6ahpJhiQq0ywTyOrOk+E2N/O +n+Fpb7vXQtdrROTHre5tQV9yWnEIN7N5ZaRZoJQ39wAvDcKSctrQOHLbFKhFxF0q +fbe01sTurM0TRLfJK91DACX6YblpalgjEbenM49WdVn1zSnXRrcKK2W200JvFbK4 +e/vv6V1T1TRaJwIDAQABo4G9MIG6MB8GA1UdIwQYMBaAFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMB0GA1UdDgQWBBStvZh6NLQm9/rEJlTvA73gJMtUGjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zARBglghkgBhvhCAQEEBAMCAQIwRAYDVR0f +BD0wOzA5oDegNYYzaHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmly +c3QtSGFyZHdhcmUuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQByQhANOs4kClrwF8BW +onvUOGCSjRK52zYZgDXYNjDtmr5rJ6NyPFDNn+JxkLpjYetIFMTbSRe679Bt8m7a +gIAoQYFQtxMuyLnJegB2aEbQiIxh/tC21UcFF7ktdnDoTlA6w3pLuvunaI84Of3o +2YBrhzkTbCfaYk5JRlTpudW9DkUkHBsyx3nknPKnplkIGaK0jgn8E0n+SFabYaHk +I9LroYT/+JtLefh9lgBdAgVv0UPbzoGfuDsrk/Zh+UrgbLFpHoVnElhzbkh64Z0X +OGaJunQc68cCZu5HTn/aK7fBGMcVflRCXLVEQpU9PIAdGA8Ynvg684t8GMaKsRl1 +jIGZ -----END CERTIFICATE----- - 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC - i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware -----BEGIN CERTIFICATE----- -MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG -EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD -VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu -dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 -E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ -D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK -4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq -lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW -bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB -o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT -MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js -LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr -BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB -AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft -Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj -j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH -KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv -2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 -mfnGV/TJVTl4uix5yaaIK/QI +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG +A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe +MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v +d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh +cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn +0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ +M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a +MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd +oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI +DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy +oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0 +dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy +bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF +BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli +CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE +CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t +3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS +KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA== -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/test_mailbox.py b/lib-python/2.7/test/test_mailbox.py --- a/lib-python/2.7/test/test_mailbox.py +++ b/lib-python/2.7/test/test_mailbox.py @@ -38,14 +38,9 @@ def _delete_recursively(self, target): # Delete a file or delete a directory recursively if os.path.isdir(target): - for path, dirs, files in os.walk(target, topdown=False): - for name in files: - os.remove(os.path.join(path, name)) - for name in dirs: - os.rmdir(os.path.join(path, name)) - os.rmdir(target) + test_support.rmtree(target) elif os.path.exists(target): - os.remove(target) + test_support.unlink(target) class TestMailbox(TestBase): @@ -137,6 +132,7 @@ msg = self._box.get(key1) self.assertEqual(msg['from'], 'foo') self.assertEqual(msg.fp.read(), '1') + msg.fp.close() def test_getitem(self): # Retrieve message using __getitem__() @@ -169,10 +165,14 @@ # Get file representations of messages key0 = self._box.add(self._template % 0) key1 = self._box.add(_sample_message) - self.assertEqual(self._box.get_file(key0).read().replace(os.linesep, '\n'), + msg0 = self._box.get_file(key0) + self.assertEqual(msg0.read().replace(os.linesep, '\n'), self._template % 0) - self.assertEqual(self._box.get_file(key1).read().replace(os.linesep, '\n'), + msg1 = self._box.get_file(key1) + self.assertEqual(msg1.read().replace(os.linesep, '\n'), _sample_message) + msg0.close() + msg1.close() def test_get_file_can_be_closed_twice(self): # Issue 11700 @@ -407,6 +407,7 @@ self._box.add(contents[0]) self._box.add(contents[1]) self._box.add(contents[2]) + oldbox = self._box method() if should_call_close: self._box.close() @@ -415,6 +416,7 @@ self.assertEqual(len(keys), 3) for key in keys: self.assertIn(self._box.get_string(key), contents) + oldbox.close() def test_dump_message(self): # Write message representations to disk @@ -1835,6 +1837,10 @@ def setUp(self): # create a new maildir mailbox to work with: self._dir = test_support.TESTFN + if os.path.isdir(self._dir): + test_support.rmtree(self._dir) + if os.path.isfile(self._dir): + test_support.unlink(self._dir) os.mkdir(self._dir) os.mkdir(os.path.join(self._dir, "cur")) os.mkdir(os.path.join(self._dir, "tmp")) @@ -1844,10 +1850,10 @@ def tearDown(self): map(os.unlink, self._msgfiles) - os.rmdir(os.path.join(self._dir, "cur")) - os.rmdir(os.path.join(self._dir, "tmp")) - os.rmdir(os.path.join(self._dir, "new")) - os.rmdir(self._dir) + test_support.rmdir(os.path.join(self._dir, "cur")) + test_support.rmdir(os.path.join(self._dir, "tmp")) + test_support.rmdir(os.path.join(self._dir, "new")) + test_support.rmdir(self._dir) def createMessage(self, dir, mbox=False): t = int(time.time() % 1000000) @@ -1883,7 +1889,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1891,7 +1899,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1900,8 +1910,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 2) - self.assertIsNot(self.mbox.next(), None) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1910,11 +1924,13 @@ import email.parser fname = self.createMessage("cur", True) n = 0 - for msg in mailbox.PortableUnixMailbox(open(fname), + fid = open(fname) + for msg in mailbox.PortableUnixMailbox(fid, email.parser.Parser().parse): n += 1 self.assertEqual(msg["subject"], "Simple Test") self.assertEqual(len(str(msg)), len(FROM_)+len(DUMMY_MESSAGE)) + fid.close() self.assertEqual(n, 1) ## End: classes from the original module (for backward compatibility). diff --git a/lib-python/2.7/test/test_mmap.py b/lib-python/2.7/test/test_mmap.py --- a/lib-python/2.7/test/test_mmap.py +++ b/lib-python/2.7/test/test_mmap.py @@ -11,7 +11,7 @@ def setUp(self): if os.path.exists(TESTFN): - os.unlink(TESTFN) + unlink(TESTFN) def tearDown(self): try: diff --git a/lib-python/2.7/test/test_old_mailbox.py b/lib-python/2.7/test/test_old_mailbox.py --- a/lib-python/2.7/test/test_old_mailbox.py +++ b/lib-python/2.7/test/test_old_mailbox.py @@ -73,7 +73,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -81,7 +83,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -90,8 +94,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 2) - self.assertTrue(self.mbox.next() is not None) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -75,7 +75,7 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + open(name, "w").close() self.files.append(name) def test_tempnam(self): diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -111,13 +111,12 @@ if test_support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") self.assertEqual(p['subject'], - ((('countryName', u'US'),), - (('stateOrProvinceName', u'Delaware'),), - (('localityName', u'Wilmington'),), - (('organizationName', u'Python Software Foundation'),), - (('organizationalUnitName', u'SSL'),), - (('commonName', u'somemachine.python.org'),)), + ((('countryName', 'XY'),), + (('localityName', 'Castle Anthrax'),), + (('organizationName', 'Python Software Foundation'),), + (('commonName', 'localhost'),)) ) + self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),)) # Issue #13034: the subjectAltName in some certificates # (notably projects.developer.nokia.com:443) wasn't parsed p = ssl._ssl._test_decode_cert(NOKIACERT) diff --git a/lib-python/2.7/test/test_support.py b/lib-python/2.7/test/test_support.py --- a/lib-python/2.7/test/test_support.py +++ b/lib-python/2.7/test/test_support.py @@ -179,15 +179,79 @@ except KeyError: pass +if sys.platform.startswith("win"): + def _waitfor(func, pathname, waitall=False): + # Peform the operation + func(pathname) + # Now setup the wait loop + if waitall: + dirname = pathname + else: + dirname, name = os.path.split(pathname) + dirname = dirname or '.' + # Check for `pathname` to be removed from the filesystem. + # The exponential backoff of the timeout amounts to a total + # of ~1 second after which the deletion is probably an error + # anyway. + # Testing on a i7 at 4.3GHz shows that usually only 1 iteration is + # required when contention occurs. + timeout = 0.001 + while timeout < 1.0: + # Note we are only testing for the existance of the file(s) in + # the contents of the directory regardless of any security or + # access rights. If we have made it this far, we have sufficient + # permissions to do that much using Python's equivalent of the + # Windows API FindFirstFile. + # Other Windows APIs can fail or give incorrect results when + # dealing with files that are pending deletion. + L = os.listdir(dirname) + if not (L if waitall else name in L): + return + # Increase the timeout and try again + time.sleep(timeout) + timeout *= 2 + warnings.warn('tests may fail, delete still pending for ' + pathname, + RuntimeWarning, stacklevel=4) + + def _unlink(filename): + _waitfor(os.unlink, filename) + + def _rmdir(dirname): + _waitfor(os.rmdir, dirname) + + def _rmtree(path): + def _rmtree_inner(path): + for name in os.listdir(path): + fullname = os.path.join(path, name) + if os.path.isdir(fullname): + _waitfor(_rmtree_inner, fullname, waitall=True) + os.rmdir(fullname) + else: + os.unlink(fullname) + _waitfor(_rmtree_inner, path, waitall=True) + _waitfor(os.rmdir, path) +else: + _unlink = os.unlink + _rmdir = os.rmdir + _rmtree = shutil.rmtree + def unlink(filename): try: - os.unlink(filename) + _unlink(filename) except OSError: pass +def rmdir(dirname): + try: + _rmdir(dirname) + except OSError as error: + # The directory need not exist. + if error.errno != errno.ENOENT: + raise + def rmtree(path): try: - shutil.rmtree(path) + _rmtree(path) except OSError, e: # Unix returns ENOENT, Windows returns ESRCH. if e.errno not in (errno.ENOENT, errno.ESRCH): diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/2.7/test/test_tarfile.py --- a/lib-python/2.7/test/test_tarfile.py +++ b/lib-python/2.7/test/test_tarfile.py @@ -300,26 +300,21 @@ def test_extract_hardlink(self): # Test hardlink extraction (e.g. bug #857297). - tar = tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") + with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar: + tar.extract("ustar/regtype", TEMPDIR) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/regtype")) - tar.extract("ustar/regtype", TEMPDIR) - try: tar.extract("ustar/lnktype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("hardlink not extracted properly") + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/lnktype")) + with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) - data = open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) - - try: tar.extract("ustar/symtype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("symlink not extracted properly") - - data = open(os.path.join(TEMPDIR, "ustar/symtype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/symtype")) + with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) def test_extractall(self): # Test if extractall() correctly restores directory permissions @@ -340,7 +335,7 @@ # constructor in case of an error. For the test we rely on # the fact that opening an empty file raises a ReadError. empty = os.path.join(TEMPDIR, "empty") - open(empty, "wb").write("") + open(empty, "wb").close() try: tar = object.__new__(tarfile.TarFile) @@ -351,7 +346,7 @@ else: self.fail("ReadError not raised") finally: - os.remove(empty) + test_support.unlink(empty) class StreamReadTest(CommonReadTest): @@ -1327,7 +1322,7 @@ def setUp(self): self.tarname = tmpname if os.path.exists(self.tarname): - os.remove(self.tarname) + test_support.unlink(self.tarname) def _add_testfile(self, fileobj=None): tar = tarfile.open(self.tarname, "a", fileobj=fileobj) diff --git a/lib-python/2.7/traceback.py b/lib-python/2.7/traceback.py --- a/lib-python/2.7/traceback.py +++ b/lib-python/2.7/traceback.py @@ -107,7 +107,7 @@ return list -def print_exception(etype, value, tb, limit=None, file=None): +def print_exception(etype, value, tb, limit=None, file=None, _encoding=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if @@ -123,7 +123,7 @@ if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) - lines = format_exception_only(etype, value) + lines = format_exception_only(etype, value, _encoding) for line in lines: _print(file, line, '') @@ -144,7 +144,7 @@ list = list + format_exception_only(etype, value) return list -def format_exception_only(etype, value): +def format_exception_only(etype, value, _encoding=None): """Format the exception part of a traceback. The arguments are the exception type and value such as given by @@ -170,12 +170,12 @@ if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or etype is None or type(etype) is str): - return [_format_final_exc_line(etype, value)] + return [_format_final_exc_line(etype, value, _encoding)] stype = etype.__name__ if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] + return [_format_final_exc_line(stype, value, _encoding)] # It was a syntax error; show exactly where the problem was found. lines = [] @@ -196,26 +196,26 @@ lines.append(' %s^\n' % ''.join(caretspace)) value = msg - lines.append(_format_final_exc_line(stype, value)) + lines.append(_format_final_exc_line(stype, value, _encoding)) return lines -def _format_final_exc_line(etype, value): +def _format_final_exc_line(etype, value, _encoding=None): """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) + valuestr = _some_str(value, _encoding) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line -def _some_str(value): +def _some_str(value, _encoding=None): try: return str(value) except Exception: pass try: value = unicode(value) - return value.encode("ascii", "backslashreplace") + return value.encode(_encoding or "ascii", "backslashreplace") except Exception: pass return '' % type(value).__name__ diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -48,7 +48,14 @@ def remove(wr, selfref=ref(self)): self = selfref() if self is not None: - del self.data[wr.key] + # Changed this for PyPy: made more resistent. The + # issue is that in some corner cases, self.data + # might already be changed or removed by the time + # this weakref's callback is called. If that is + # the case, we don't want to randomly kill an + # unrelated entry. + if self.data.get(wr.key) is wr: + del self.data[wr.key] self._remove = remove UserDict.UserDict.__init__(self, *args, **kw) @@ -160,22 +167,26 @@ try: o = self.data.pop(key)() except KeyError: + o = None + if o is None: if args: return args[0] - raise - if o is None: raise KeyError, key else: return o + # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: - wr = self.data[key] + o = self.data[key]() except KeyError: + o = None + if o is None: self.data[key] = KeyedRef(default, self._remove, key) return default else: - return wr() + return o + # The logic above was fixed in PyPy def update(self, dict=None, **kwargs): d = self.data diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -268,10 +268,18 @@ if _has_load_extension(): _ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);") -_lib = _ffi.verify(""" -#include -""", libraries=['sqlite3'] -) +if sys.platform.startswith('freebsd'): + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'] + ) +else: + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'] + ) exported_sqlite_symbols = [ 'SQLITE_ALTER_TABLE', @@ -363,9 +371,11 @@ pass -def connect(database, **kwargs): - factory = kwargs.get("factory", Connection) - return factory(database, **kwargs) +def connect(database, timeout=5.0, detect_types=0, isolation_level="", + check_same_thread=True, factory=None, cached_statements=100): + factory = Connection if not factory else factory + return factory(database, timeout, detect_types, isolation_level, + check_same_thread, factory, cached_statements) def _unicode_text_factory(x): diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py --- a/lib_pypy/_tkinter/tclobj.py +++ b/lib_pypy/_tkinter/tclobj.py @@ -28,9 +28,11 @@ return result elif value.typePtr == typeCache.BooleanType: - return result + return bool(value.internalRep.longValue) elif value.typePtr == typeCache.ByteArrayType: - return result + size = tkffi.new('int*') + data = tklib.Tcl_GetByteArrayFromObj(value, size) + return tkffi.buffer(data, size[0])[:] elif value.typePtr == typeCache.DoubleType: return value.internalRep.doubleValue elif value.typePtr == typeCache.IntType: @@ -50,7 +52,7 @@ result.append(FromObj(app, tcl_elem[0])) return tuple(result) elif value.typePtr == typeCache.ProcBodyType: - return result + pass # fall through and return tcl object. elif value.typePtr == typeCache.StringType: buf = tklib.Tcl_GetUnicode(value) length = tklib.Tcl_GetCharLength(value) diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -72,6 +72,7 @@ int Tcl_GetBoolean(Tcl_Interp* interp, const char* src, int* boolPtr); char *Tcl_GetString(Tcl_Obj* objPtr); char *Tcl_GetStringFromObj(Tcl_Obj* objPtr, int* lengthPtr); +unsigned char *Tcl_GetByteArrayFromObj(Tcl_Obj* objPtr, int* lengthPtr); Tcl_UniChar *Tcl_GetUnicode(Tcl_Obj* objPtr); int Tcl_GetCharLength(Tcl_Obj* objPtr); @@ -111,6 +112,14 @@ incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform.startswith("freebsd"): + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include'] + linklibs = ['tk86', 'tcl86'] + libdirs = ['/usr/local/lib'] +elif sys.platform == 'win32': + incdirs = [] + linklibs = ['tcl85', 'tk85'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.7 +Version: 0.8 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7.2" -__version_info__ = (0, 7, 2) +__version__ = "0.8" +__version_info__ = (0, 8) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,5 @@ import types +from .lock import allocate_lock try: callable @@ -61,6 +62,7 @@ # rely on it! It's probably not going to work well.) self._backend = backend + self._lock = allocate_lock() self._parser = cparser.Parser() self._cached_btypes = {} self._parsed_types = types.ModuleType('parsed_types').__dict__ @@ -74,7 +76,8 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - self.BVoidP = self._get_cached_btype(model.voidp_type) + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -95,11 +98,12 @@ if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') - self._parser.parse(csource, override=override) - self._cdefsources.append(csource) - if override: - for cache in self._function_caches: - cache.clear() + with self._lock: + self._parser.parse(csource, override=override) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -109,31 +113,47 @@ library we only look for the actual (untyped) symbols. """ assert isinstance(name, basestring) or name is None - lib, function_cache = _make_ffi_library(self, name, flags) - self._function_caches.append(function_cache) - self._libraries.append(lib) + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) return lib + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + really_a_function_type = type.is_raw_function + if really_a_function_type: + type = type.as_function_pointer() + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, cfaf = self._parsed_types[cdecl] - if consider_function_as_funcptr and not cfaf: - raise KeyError + result = self._parsed_types[cdecl] except KeyError: - key = cdecl - if not isinstance(cdecl, str): # unicode, on Python 2 - cdecl = cdecl.encode('ascii') - cfaf = consider_function_as_funcptr - type = self._parser.parse_type(cdecl, - consider_function_as_funcptr=cfaf) - btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, cfaf + with self._lock: + result = self._typeof_locked(cdecl) + # + btype, really_a_function_type = result + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) return btype def typeof(self, cdecl): """Parse the C type given as a string and return the - corresponding Python type: '>. + corresponding object. It can also be used on 'cdata' instance to get its C type. """ if isinstance(cdecl, basestring): @@ -144,6 +164,10 @@ res = _builtin_function_type(cdecl) if res is not None: return res + if (isinstance(cdecl, types.FunctionType) + and hasattr(cdecl, '_cffi_base_type')): + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) def sizeof(self, cdecl): @@ -280,14 +304,17 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + with self._lock: + try: + gc_weakrefs = self.gc_weakrefs + except AttributeError: + from .gc_weakref import GcWeakrefs + gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) + return gc_weakrefs.build(cdata, destructor) def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! try: BType = self._cached_btypes[type] except KeyError: @@ -320,9 +347,13 @@ errno = property(_get_errno, _set_errno, None, "the value of 'errno' from/to the C calls") + def getwinerror(self, code=-1): + return self._backend.getwinerror(code) + def _pointer_to(self, ctype): from . import model - return model.pointer_cache(self, ctype) + with self._lock: + return model.pointer_cache(self, ctype) def addressof(self, cdata, field=None): """Return the address of a . @@ -342,10 +373,12 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ - self._parser.include(ffi_to_include._parser) - self._cdefsources.append('[') - self._cdefsources.extend(ffi_to_include._cdefsources) - self._cdefsources.append(']') + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) @@ -372,7 +405,7 @@ backendlib = backend.load_library(path, flags) copied_enums = [] # - def make_accessor(name): + def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: tp = ffi._parser._declarations[key] @@ -404,11 +437,17 @@ if enumname not in library.__dict__: library.__dict__[enumname] = enumval copied_enums.append(True) + if name in library.__dict__: + return # - if name in library.__dict__: # copied from an enum value just above, - return # or multithread's race condition raise AttributeError(name) # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + make_accessor_locked(name) + # class FFILibrary(object): def __getattr__(self, name): make_accessor(name) @@ -444,4 +483,5 @@ except (KeyError, AttributeError, TypeError): return None else: - return ffi._get_cached_btype(tp) + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -142,7 +142,7 @@ if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] if line: - msg = 'cannot parse "%s"\n%s' % (line, msg) + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) @@ -217,19 +217,18 @@ # if decl.name: tp = self._get_type(node, partial_length_ok=True) - if self._is_constant_declaration(node): + if self._is_constant_globalvar(node): self._declare('constant ' + decl.name, tp) else: self._declare('variable ' + decl.name, tp) - def parse_type(self, cdecl, consider_function_as_funcptr=False): + def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type, - consider_function_as_funcptr=consider_function_as_funcptr) + return self._get_type(exprnode.type) def _declare(self, name, obj): if name in self._declarations: @@ -249,28 +248,17 @@ return model.ConstPointerType(type) return model.PointerType(type) - def _get_type(self, typenode, convert_array_to_pointer=False, - name=None, partial_length_ok=False, - consider_function_as_funcptr=False): + def _get_type(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): type = self._declarations['typedef ' + typenode.type.names[0]] - if isinstance(type, model.ArrayType): - if convert_array_to_pointer: - return type.item - else: - if (consider_function_as_funcptr and - isinstance(type, model.RawFunctionType)): - return type.as_function_pointer() return type # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type - if convert_array_to_pointer: - return self._get_type_pointer(self._get_type(typenode.type)) if typenode.dim is None: length = None else: @@ -331,10 +319,7 @@ # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - result = self._parse_function_type(typenode, name) - if consider_function_as_funcptr: - result = result.as_function_pointer() - return result + return self._parse_function_type(typenode, name) # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): @@ -365,21 +350,24 @@ isinstance(params[0].type.type, pycparser.c_ast.IdentifierType) and list(params[0].type.type.names) == ['void']): del params[0] - args = [self._get_type(argdeclnode.type, - convert_array_to_pointer=True, - consider_function_as_funcptr=True) + args = [self._as_func_arg(self._get_type(argdeclnode.type)) for argdeclnode in params] result = self._get_type(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _is_constant_declaration(self, typenode, const=False): - if isinstance(typenode, pycparser.c_ast.ArrayDecl): - return self._is_constant_declaration(typenode.type) + def _as_func_arg(self, type): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _is_constant_globalvar(self, typenode): if isinstance(typenode, pycparser.c_ast.PtrDecl): - const = 'const' in typenode.quals - return self._is_constant_declaration(typenode.type, const) + return 'const' in typenode.quals if isinstance(typenode, pycparser.c_ast.TypeDecl): - return const or 'const' in typenode.quals + return 'const' in typenode.quals return False def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): @@ -491,7 +479,7 @@ return tp def _make_partial(self, tp, nested): - if not isinstance(tp, model.StructType): + if not isinstance(tp, model.StructOrUnion): raise api.CDefError("%s cannot be partial" % (tp,)) if not tp.has_c_name() and not nested: raise NotImplementedError("%s is partial but has no C name" %(tp,)) @@ -511,7 +499,7 @@ if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): self._partial_length = True - return None + return '...' # raise api.FFIError("unsupported expression: expected a " "simple numeric constant") diff --git a/lib_pypy/cffi/gc_weakref.py b/lib_pypy/cffi/gc_weakref.py --- a/lib_pypy/cffi/gc_weakref.py +++ b/lib_pypy/cffi/gc_weakref.py @@ -14,6 +14,6 @@ def build(self, cdata, destructor): # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi.typeof(cdata), cdata) + new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) self.data[ref(new_cdata, self.remove)] = destructor, cdata return new_cdata diff --git a/lib_pypy/cffi/lock.py b/lib_pypy/cffi/lock.py new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,7 +1,10 @@ import weakref +from .lock import allocate_lock + class BaseTypeByIdentity(object): is_array_type = False + is_raw_function = False def get_c_name(self, replace_with='', context='a C file'): result = self.c_name_with_marker @@ -146,6 +149,7 @@ # a function, but not a pointer-to-function. The backend has no # notion of such a type; it's used temporarily by parsing. _base_pattern = '(&)(%s)' + is_raw_function = True def build_backend_type(self, ffi, finishlist): from . import api @@ -192,10 +196,6 @@ _base_pattern = " const *&" _base_pattern_array = "(const *&)" - def build_backend_type(self, ffi, finishlist): - BPtr = PointerType(self.totype).get_cached_btype(ffi, finishlist) - return BPtr - const_voidp_type = ConstPointerType(void_type) @@ -216,10 +216,12 @@ self.item = item self.length = length # - if self.length is None: + if length is None: brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' else: - brackets = '&[%d]' % self.length + brackets = '&[%d]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) @@ -227,6 +229,10 @@ return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): + if self.length == '...': + from . import api + raise api.CDefError("cannot render the type %r: unknown length" % + (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) @@ -252,6 +258,7 @@ class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None completed = False + partial = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -303,20 +310,21 @@ return # not completing it: it's an opaque struct # self.completed = 1 - fldtypes = tuple(tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes) # if self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) ffi._backend.complete_struct_or_union(BType, lst, self) # else: + fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # - if isinstance(ftype, ArrayType) and ftype.length is None: + if isinstance(ftype, ArrayType) and ftype.length == '...': # fix the length to match the total size BItemType = ftype.item.get_cached_btype(ffi, finishlist) nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) @@ -327,18 +335,20 @@ ftype = ftype.resolve_length(nlen) self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) - BArrayType = ftype.get_cached_btype(ffi, finishlist) - fldtypes = (fldtypes[:i] + (BArrayType,) + - fldtypes[i+1:]) - continue # - bitemsize = ffi.sizeof(fldtypes[i]) - if bitemsize != fsize: - self._verification_error( - "field '%s.%s' is declared as %d bytes, but is " - "really %d bytes" % (self.name, - self.fldnames[i] or '{}', - bitemsize, fsize)) + BFieldType = ftype.get_cached_btype(ffi, finishlist) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) + fldtypes.append(BFieldType) + # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) ffi._backend.complete_struct_or_union(BType, lst, self, totalsize, totalalignment) @@ -348,11 +358,6 @@ from .ffiplatform import VerificationError raise VerificationError(msg) - -class StructType(StructOrUnion): - kind = 'struct' - partial = False - def check_not_partial(self): if self.partial and self.fixedlayout is None: from . import ffiplatform @@ -361,19 +366,18 @@ def build_backend_type(self, ffi, finishlist): self.check_not_partial() finishlist.append(self) - - return global_cache(self, ffi, 'new_struct_type', + # + return global_cache(self, ffi, 'new_%s_type' % self.kind, self.get_official_name(), key=self) +class StructType(StructOrUnion): + kind = 'struct' + + class UnionType(StructOrUnion): kind = 'union' - def build_backend_type(self, ffi, finishlist): - finishlist.append(self) - return global_cache(self, ffi, 'new_union_type', - self.get_official_name(), key=self) - class EnumType(StructOrUnionOrEnum): kind = 'enum' @@ -387,6 +391,12 @@ self.baseinttype = baseinttype self.build_c_name_with_marker() + def force_the_name(self, forcename): + StructOrUnionOrEnum.force_the_name(self, forcename) + if self.forcename is None: + name = self.get_official_name() + self.forcename = '$' + name.replace(' ', '_') + def check_not_partial(self): if self.partial and not self.partial_resolved: from . import ffiplatform @@ -444,6 +454,9 @@ tp = StructType(structname, None, None, None) return NamedPointerType(tp, name) + +global_lock = allocate_lock() + def global_cache(srctype, ffi, funcname, *args, **kwds): key = kwds.pop('key', (funcname, args)) assert not kwds @@ -464,8 +477,17 @@ res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: raise NotImplementedError("%r: %s" % (srctype, e)) - ffi._backend.__typecache[key] = res - return res + # note that setdefault() on WeakValueDictionary is not atomic + # and contains a rare bug (http://bugs.python.org/issue19542); + # we have to use a lock and do it ourselves + cache = ffi._backend.__typecache + with global_lock: + res1 = cache.get(key) + if res1 is None: + cache[key] = res + return res + else: + return res1 def pointer_cache(ffi, BType): return global_cache('?', ffi, 'new_pointer_type', BType) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -15,7 +15,7 @@ def patch_extension_kwds(self, kwds): pass - def find_module(self, module_name, path, so_suffix): + def find_module(self, module_name, path, so_suffixes): try: f, filename, descr = imp.find_module(module_name, path) except ImportError: @@ -25,7 +25,7 @@ # Note that after a setuptools installation, there are both .py # and .so files with the same basename. The code here relies on # imp.find_module() locating the .so in priority. - if descr[0] != so_suffix: + if descr[0] not in so_suffixes: return None return filename @@ -160,7 +160,10 @@ def __dir__(self): return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() - module._cffi_setup(lst, ffiplatform.VerificationError, library) + if module._cffi_setup(lst, ffiplatform.VerificationError, library): + import warnings + warnings.warn("reimporting %r might overwrite older definitions" + % (self.verifier.get_module_name())) # # finally, call the loaded_cpy_xxx() functions. This will perform # the final adjustments, like copying the Python->C wrapper @@ -280,8 +283,8 @@ return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, model.ArrayType): - return '_cffi_from_c_deref((char *)%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) elif isinstance(tp, model.StructType): if tp.fldnames is None: raise TypeError("'%s' is used as %s, but is opaque" % ( @@ -464,11 +467,14 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return _cffi_get_struct_layout(nums);') @@ -491,7 +497,7 @@ # function = getattr(module, layoutfuncname) layout = function() - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -528,9 +534,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -566,7 +573,7 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True): + vartp=None, delayed=True, size_too=False): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -597,6 +604,15 @@ '(unsigned long long)(%s));' % (name,)) prnt(' if (o == NULL)') prnt(' return -1;') + if size_too: + prnt(' {') + prnt(' PyObject *o1 = o;') + prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));' + % (name,)) + prnt(' Py_DECREF(o1);') + prnt(' if (o == NULL)') + prnt(' return -1;') + prnt(' }') prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name) prnt(' Py_DECREF(o);') prnt(' if (res < 0)') @@ -633,12 +649,23 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %d, ' - 'not %d",') - prnt(' "%s", "%s", (int)%s, %d);' % ( - name, enumerator, enumerator, enumvalue)) + prnt(' "enum %s: %s has the real value %s, ' + 'not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + name, enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return %s;' % self._chained_list_constants[True]) @@ -677,15 +704,16 @@ def _generate_cpy_variable_collecttype(self, tp, name): if isinstance(tp, model.ArrayType): - self._do_collect_type(tp) + tp_ptr = model.PointerType(tp.item) else: tp_ptr = model.PointerType(tp) - self._do_collect_type(tp_ptr) + self._do_collect_type(tp_ptr) def _generate_cpy_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): tp_ptr = model.PointerType(tp.item) - self._generate_cpy_const(False, name, tp, vartp=tp_ptr) + self._generate_cpy_const(False, name, tp, vartp=tp_ptr, + size_too = (tp.length == '...')) else: tp_ptr = model.PointerType(tp) self._generate_cpy_const(False, name, tp_ptr, category='var') @@ -694,11 +722,29 @@ _loading_cpy_variable = _loaded_noop def _loaded_cpy_variable(self, tp, name, module, library): + value = getattr(library, name) if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the - return # sense that "a=..." is forbidden + # sense that "a=..." is forbidden + if tp.length == '...': + assert isinstance(value, tuple) + (value, size) = value + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. - ptr = getattr(library, name) + ptr = value delattr(library, name) def getter(library): return ptr[0] @@ -711,12 +757,9 @@ def _generate_setup_custom(self): prnt = self._prnt - prnt('static PyObject *_cffi_setup_custom(PyObject *lib)') + prnt('static int _cffi_setup_custom(PyObject *lib)') prnt('{') - prnt(' if (%s < 0)' % self._chained_list_constants[True]) - prnt(' return NULL;') - prnt(' Py_INCREF(Py_None);') - prnt(' return Py_None;') + prnt(' return %s;' % self._chained_list_constants[True]) prnt('}') cffimod_header = r''' @@ -834,17 +877,20 @@ static void *_cffi_exports[_CFFI_NUM_EXPORTS]; static PyObject *_cffi_types, *_cffi_VerificationError; -static PyObject *_cffi_setup_custom(PyObject *lib); /* forward */ +static int _cffi_setup_custom(PyObject *lib); /* forward */ static PyObject *_cffi_setup(PyObject *self, PyObject *args) { PyObject *library; + int was_alive = (_cffi_types != NULL); if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, &library)) return NULL; Py_INCREF(_cffi_types); Py_INCREF(_cffi_VerificationError); - return _cffi_setup_custom(library); + if (_cffi_setup_custom(library) < 0) + return NULL; + return PyBool_FromLong(was_alive); } static void _cffi_init(void) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -20,15 +20,15 @@ # up in kwds['export_symbols']. kwds.setdefault('export_symbols', self.export_symbols) - def find_module(self, module_name, path, so_suffix): - basename = module_name + so_suffix - if path is None: - path = sys.path - for dirname in path: - filename = os.path.join(dirname, basename) - if os.path.isfile(filename): - return filename - return None + def find_module(self, module_name, path, so_suffixes): + for so_suffix in so_suffixes: + basename = module_name + so_suffix + if path is None: + path = sys.path + for dirname in path: + filename = os.path.join(dirname, basename) + if os.path.isfile(filename): + return filename def collect_types(self): pass # not needed in the generic engine @@ -173,6 +173,7 @@ newfunction = self._load_constant(False, tp, name, module) else: indirections = [] + base_tp = tp if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): indirect_args = [] for i, typ in enumerate(tp.args): @@ -186,16 +187,18 @@ wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) for i, typ in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, typ) + newfunction = self._make_struct_wrapper(newfunction, i, typ, + base_tp) setattr(library, name, newfunction) type(library)._cffi_dir.append(name) - def _make_struct_wrapper(self, oldfunc, i, tp): + def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): backend = self.ffi._backend BType = self.ffi._get_cached_btype(tp) def newfunc(*args): args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] return oldfunc(*args) + newfunc._cffi_base_type = base_tp return newfunc # ---------- @@ -252,11 +255,14 @@ prnt(' static ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return nums[i];') @@ -270,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi.typeof("ssize_t(*)(ssize_t)") + BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -279,7 +285,7 @@ if x < 0: break layout.append(x) num += 1 - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -316,9 +322,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -379,15 +386,17 @@ def _load_constant(self, is_int, tp, name, module): funcname = '_cffi_const_%s' % name if is_int: From noreply at buildbot.pypy.org Tue Nov 19 06:47:36 2013 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 19 Nov 2013 06:47:36 +0100 (CET) Subject: [pypy-commit] pypy default: whoops Message-ID: <20131119054736.B07691C0225@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r68233:f265ce8a2524 Date: 2013-11-19 07:48 +0200 http://bitbucket.org/pypy/pypy/changeset/f265ce8a2524/ Log: whoops diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -10,6 +10,6 @@ .. branch: numpy-newbyteorder Clean up numpy types, add newbyteorder functionality -.. branch windows-packaging +.. branch: windows-packaging Package tk/tcl runtime with win32 From noreply at buildbot.pypy.org Tue Nov 19 09:31:44 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 19 Nov 2013 09:31:44 +0100 (CET) Subject: [pypy-commit] pypy armhf-singlefloat: merge default Message-ID: <20131119083144.EED111C0330@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: armhf-singlefloat Changeset: r68234:9f4093666c5d Date: 2013-11-19 09:28 +0100 http://bitbucket.org/pypy/pypy/changeset/9f4093666c5d/ Log: merge default diff too long, truncating to 2000 out of 4681 lines diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -33,7 +33,7 @@ $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest -to use virtualenv with the resulting pypy-c as the interpreter, you can +to use virtualenv with the resulting pypy-c as the interpreter; you can find more details about various installation schemes here: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy diff --git a/lib-python/2.7/test/test_mailbox.py b/lib-python/2.7/test/test_mailbox.py --- a/lib-python/2.7/test/test_mailbox.py +++ b/lib-python/2.7/test/test_mailbox.py @@ -38,14 +38,9 @@ def _delete_recursively(self, target): # Delete a file or delete a directory recursively if os.path.isdir(target): - for path, dirs, files in os.walk(target, topdown=False): - for name in files: - os.remove(os.path.join(path, name)) - for name in dirs: - os.rmdir(os.path.join(path, name)) - os.rmdir(target) + test_support.rmtree(target) elif os.path.exists(target): - os.remove(target) + test_support.unlink(target) class TestMailbox(TestBase): @@ -137,6 +132,7 @@ msg = self._box.get(key1) self.assertEqual(msg['from'], 'foo') self.assertEqual(msg.fp.read(), '1') + msg.fp.close() def test_getitem(self): # Retrieve message using __getitem__() @@ -169,10 +165,14 @@ # Get file representations of messages key0 = self._box.add(self._template % 0) key1 = self._box.add(_sample_message) - self.assertEqual(self._box.get_file(key0).read().replace(os.linesep, '\n'), + msg0 = self._box.get_file(key0) + self.assertEqual(msg0.read().replace(os.linesep, '\n'), self._template % 0) - self.assertEqual(self._box.get_file(key1).read().replace(os.linesep, '\n'), + msg1 = self._box.get_file(key1) + self.assertEqual(msg1.read().replace(os.linesep, '\n'), _sample_message) + msg0.close() + msg1.close() def test_get_file_can_be_closed_twice(self): # Issue 11700 @@ -407,6 +407,7 @@ self._box.add(contents[0]) self._box.add(contents[1]) self._box.add(contents[2]) + oldbox = self._box method() if should_call_close: self._box.close() @@ -415,6 +416,7 @@ self.assertEqual(len(keys), 3) for key in keys: self.assertIn(self._box.get_string(key), contents) + oldbox.close() def test_dump_message(self): # Write message representations to disk @@ -1835,6 +1837,10 @@ def setUp(self): # create a new maildir mailbox to work with: self._dir = test_support.TESTFN + if os.path.isdir(self._dir): + test_support.rmtree(self._dir) + if os.path.isfile(self._dir): + test_support.unlink(self._dir) os.mkdir(self._dir) os.mkdir(os.path.join(self._dir, "cur")) os.mkdir(os.path.join(self._dir, "tmp")) @@ -1844,10 +1850,10 @@ def tearDown(self): map(os.unlink, self._msgfiles) - os.rmdir(os.path.join(self._dir, "cur")) - os.rmdir(os.path.join(self._dir, "tmp")) - os.rmdir(os.path.join(self._dir, "new")) - os.rmdir(self._dir) + test_support.rmdir(os.path.join(self._dir, "cur")) + test_support.rmdir(os.path.join(self._dir, "tmp")) + test_support.rmdir(os.path.join(self._dir, "new")) + test_support.rmdir(self._dir) def createMessage(self, dir, mbox=False): t = int(time.time() % 1000000) @@ -1883,7 +1889,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1891,7 +1899,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1900,8 +1910,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 2) - self.assertIsNot(self.mbox.next(), None) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1910,11 +1924,13 @@ import email.parser fname = self.createMessage("cur", True) n = 0 - for msg in mailbox.PortableUnixMailbox(open(fname), + fid = open(fname) + for msg in mailbox.PortableUnixMailbox(fid, email.parser.Parser().parse): n += 1 self.assertEqual(msg["subject"], "Simple Test") self.assertEqual(len(str(msg)), len(FROM_)+len(DUMMY_MESSAGE)) + fid.close() self.assertEqual(n, 1) ## End: classes from the original module (for backward compatibility). diff --git a/lib-python/2.7/test/test_mmap.py b/lib-python/2.7/test/test_mmap.py --- a/lib-python/2.7/test/test_mmap.py +++ b/lib-python/2.7/test/test_mmap.py @@ -11,7 +11,7 @@ def setUp(self): if os.path.exists(TESTFN): - os.unlink(TESTFN) + unlink(TESTFN) def tearDown(self): try: diff --git a/lib-python/2.7/test/test_old_mailbox.py b/lib-python/2.7/test/test_old_mailbox.py --- a/lib-python/2.7/test/test_old_mailbox.py +++ b/lib-python/2.7/test/test_old_mailbox.py @@ -73,7 +73,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -81,7 +83,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -90,8 +94,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 2) - self.assertTrue(self.mbox.next() is not None) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -75,7 +75,7 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + open(name, "w").close() self.files.append(name) def test_tempnam(self): diff --git a/lib-python/2.7/test/test_support.py b/lib-python/2.7/test/test_support.py --- a/lib-python/2.7/test/test_support.py +++ b/lib-python/2.7/test/test_support.py @@ -179,15 +179,79 @@ except KeyError: pass +if sys.platform.startswith("win"): + def _waitfor(func, pathname, waitall=False): + # Peform the operation + func(pathname) + # Now setup the wait loop + if waitall: + dirname = pathname + else: + dirname, name = os.path.split(pathname) + dirname = dirname or '.' + # Check for `pathname` to be removed from the filesystem. + # The exponential backoff of the timeout amounts to a total + # of ~1 second after which the deletion is probably an error + # anyway. + # Testing on a i7 at 4.3GHz shows that usually only 1 iteration is + # required when contention occurs. + timeout = 0.001 + while timeout < 1.0: + # Note we are only testing for the existance of the file(s) in + # the contents of the directory regardless of any security or + # access rights. If we have made it this far, we have sufficient + # permissions to do that much using Python's equivalent of the + # Windows API FindFirstFile. + # Other Windows APIs can fail or give incorrect results when + # dealing with files that are pending deletion. + L = os.listdir(dirname) + if not (L if waitall else name in L): + return + # Increase the timeout and try again + time.sleep(timeout) + timeout *= 2 + warnings.warn('tests may fail, delete still pending for ' + pathname, + RuntimeWarning, stacklevel=4) + + def _unlink(filename): + _waitfor(os.unlink, filename) + + def _rmdir(dirname): + _waitfor(os.rmdir, dirname) + + def _rmtree(path): + def _rmtree_inner(path): + for name in os.listdir(path): + fullname = os.path.join(path, name) + if os.path.isdir(fullname): + _waitfor(_rmtree_inner, fullname, waitall=True) + os.rmdir(fullname) + else: + os.unlink(fullname) + _waitfor(_rmtree_inner, path, waitall=True) + _waitfor(os.rmdir, path) +else: + _unlink = os.unlink + _rmdir = os.rmdir + _rmtree = shutil.rmtree + def unlink(filename): try: - os.unlink(filename) + _unlink(filename) except OSError: pass +def rmdir(dirname): + try: + _rmdir(dirname) + except OSError as error: + # The directory need not exist. + if error.errno != errno.ENOENT: + raise + def rmtree(path): try: - shutil.rmtree(path) + _rmtree(path) except OSError, e: # Unix returns ENOENT, Windows returns ESRCH. if e.errno not in (errno.ENOENT, errno.ESRCH): diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/2.7/test/test_tarfile.py --- a/lib-python/2.7/test/test_tarfile.py +++ b/lib-python/2.7/test/test_tarfile.py @@ -300,26 +300,21 @@ def test_extract_hardlink(self): # Test hardlink extraction (e.g. bug #857297). - tar = tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") + with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar: + tar.extract("ustar/regtype", TEMPDIR) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/regtype")) - tar.extract("ustar/regtype", TEMPDIR) - try: tar.extract("ustar/lnktype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("hardlink not extracted properly") + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/lnktype")) + with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) - data = open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) - - try: tar.extract("ustar/symtype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("symlink not extracted properly") - - data = open(os.path.join(TEMPDIR, "ustar/symtype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/symtype")) + with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) def test_extractall(self): # Test if extractall() correctly restores directory permissions @@ -340,7 +335,7 @@ # constructor in case of an error. For the test we rely on # the fact that opening an empty file raises a ReadError. empty = os.path.join(TEMPDIR, "empty") - open(empty, "wb").write("") + open(empty, "wb").close() try: tar = object.__new__(tarfile.TarFile) @@ -351,7 +346,7 @@ else: self.fail("ReadError not raised") finally: - os.remove(empty) + test_support.unlink(empty) class StreamReadTest(CommonReadTest): @@ -1327,7 +1322,7 @@ def setUp(self): self.tarname = tmpname if os.path.exists(self.tarname): - os.remove(self.tarname) + test_support.unlink(self.tarname) def _add_testfile(self, fileobj=None): tar = tarfile.open(self.tarname, "a", fileobj=fileobj) diff --git a/lib-python/2.7/traceback.py b/lib-python/2.7/traceback.py --- a/lib-python/2.7/traceback.py +++ b/lib-python/2.7/traceback.py @@ -107,7 +107,7 @@ return list -def print_exception(etype, value, tb, limit=None, file=None): +def print_exception(etype, value, tb, limit=None, file=None, _encoding=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if @@ -123,7 +123,7 @@ if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) - lines = format_exception_only(etype, value) + lines = format_exception_only(etype, value, _encoding) for line in lines: _print(file, line, '') @@ -144,7 +144,7 @@ list = list + format_exception_only(etype, value) return list -def format_exception_only(etype, value): +def format_exception_only(etype, value, _encoding=None): """Format the exception part of a traceback. The arguments are the exception type and value such as given by @@ -170,12 +170,12 @@ if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or etype is None or type(etype) is str): - return [_format_final_exc_line(etype, value)] + return [_format_final_exc_line(etype, value, _encoding)] stype = etype.__name__ if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] + return [_format_final_exc_line(stype, value, _encoding)] # It was a syntax error; show exactly where the problem was found. lines = [] @@ -196,26 +196,26 @@ lines.append(' %s^\n' % ''.join(caretspace)) value = msg - lines.append(_format_final_exc_line(stype, value)) + lines.append(_format_final_exc_line(stype, value, _encoding)) return lines -def _format_final_exc_line(etype, value): +def _format_final_exc_line(etype, value, _encoding=None): """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) + valuestr = _some_str(value, _encoding) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line -def _some_str(value): +def _some_str(value, _encoding=None): try: return str(value) except Exception: pass try: value = unicode(value) - return value.encode("ascii", "backslashreplace") + return value.encode(_encoding or "ascii", "backslashreplace") except Exception: pass return '' % type(value).__name__ diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -268,10 +268,18 @@ if _has_load_extension(): _ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);") -_lib = _ffi.verify(""" -#include -""", libraries=['sqlite3'] -) +if sys.platform.startswith('freebsd'): + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'] + ) +else: + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'] + ) exported_sqlite_symbols = [ 'SQLITE_ALTER_TABLE', diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -112,6 +112,14 @@ incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform.startswith("freebsd"): + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include'] + linklibs = ['tk86', 'tcl86'] + libdirs = ['/usr/local/lib'] +elif sys.platform == 'win32': + incdirs = [] + linklibs = ['tcl85', 'tk85'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -347,6 +347,9 @@ errno = property(_get_errno, _set_errno, None, "the value of 'errno' from/to the C calls") + def getwinerror(self, code=-1): + return self._backend.getwinerror(code) + def _pointer_to(self, ctype): from . import model with self._lock: diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '2.1' +version = '2.2' # The full version, including alpha/beta/rc tags. -release = '2.1.0' +release = '2.2.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -48,6 +48,6 @@ * send announcements to pypy-dev, python-list, python-announce, python-dev ... -* add a tag on jitviewer that corresponds to pypy release -* add a tag on codespeed that corresponds to pypy release +* add a tag on the pypy/jitviewer repo that corresponds to pypy release +* add a tag on the codespeed web site that corresponds to pypy release diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.1.0`_: the latest official release +* `Release 2.2.0`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.1.0`: http://pypy.org/download.html +.. _`Release 2.2.0`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/release-2.2.0.rst b/pypy/doc/release-2.2.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.2.0.rst @@ -0,0 +1,89 @@ +======================================= +PyPy 2.2 - Incrementalism +======================================= + +We're pleased to announce PyPy 2.2, which targets version 2.7.3 of the Python +language. This release main highlight is the introduction of the incremental +garbage collector, sponsored by the `Raspberry Pi Foundation`_. + +This release also contains several bugfixes and performance improvements. + +You can download the PyPy 2.2 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. We showed quite a bit of progress on all three projects (see below) +and we're slowly running out of funds. +Please consider donating more so we can finish those projects! The three +projects are: + +* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. + +* STM (software transactional memory): a preview will be released very soon, + as soon as we fix a few bugs + +* NumPy: the work done is included in the PyPy 2.2 release. More details below. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* Our Garbage Collector is now "incremental". It should avoid almost + all pauses due to a major collection taking place. Previously, it + would pause the program (rarely) to walk all live objects, which + could take arbitrarily long if your process is using a whole lot of + RAM. Now the same work is done in steps. This should make PyPy + more responsive, e.g. in games. There are still other pauses, from + the GC and the JIT, but they should be on the order of 5 + milliseconds each. + +* The JIT counters for hot code were never reset, which meant that a + process running for long enough would eventually JIT-compile more + and more rarely executed code. Not only is it useless to compile + such code, but as more compiled code means more memory used, this + gives the impression of a memory leak. This has been tentatively + fixed by decreasing the counters from time to time. + +* NumPy has been split: now PyPy only contains the core module, called + ``_numpypy``. The ``numpy`` module itself has been moved to + ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. + You need to install NumPy separately with a virtualenv: + ``pip install git+https://bitbucket.org/pypy/numpy.git``; + or directly: + ``git clone https://bitbucket.org/pypy/numpy.git``; + ``cd numpy``; ``pypy setup.py install``. + +* non-inlined calls have less overhead + +* Things that use ``sys.set_trace`` are now JITted (like coverage) + +* JSON decoding is now very fast (JSON encoding was already very fast) + +* various buffer copying methods experience speedups (like list-of-ints to + ``int[]`` buffer from cffi) + +* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, + including ``os.startfile()`` on Windows and a handful of rare ones + on Posix. + +* numpy has a rudimentary C API that cooperates with ``cpyext`` + +Cheers, +Armin Rigo and Maciej Fijalkowski diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -4,26 +4,36 @@ Introduction ------------ -It is possible to compile a version of pypy-c that runs -fully "virtualized", i.e. where an external process controls all -input/output. Such a pypy-c is a secure sandbox: it is safe to run -any untrusted Python code with it. The Python code cannot see or -modify any local file except via interaction with the external -process. It is also impossible to do any other I/O or consume more -than some amount of RAM or CPU time or real time. This works with no -OS support at all - just ANSI C code generated in a careful way. It's -the kind of thing you could embed in a browser plug-in, for example -(it would be safe even if it wasn't run as a separate process, -actually). +PyPy offers sandboxing at a level similar to OS-level sandboxing (e.g. +SECCOMP_ on Linux), but implemented in a fully portable way. To use it, +a (regular, trusted) program launches a subprocess that is a special +sandboxed version of PyPy. This subprocess can run arbitrary untrusted +Python code, but all its input/output is serialized to a stdin/stdout +pipe instead of being directly performed. The outer process reads the +pipe and decides which commands are allowed or not (sandboxing), or even +reinterprets them differently (virtualization). A potential attacker +can have arbitrary code run in the subprocess, but cannot actually do +any input/output not controlled by the outer process. Additional +barriers are put to limit the amount of RAM and CPU time used. -For comparison, trying to plug CPython into a special virtualizing C -library is not only OS-specific, but unsafe, because one of the known -ways to segfault CPython could be used by an attacker to trick CPython -into issuing malicious system calls directly. The C code generated by +Note that this is very different from sandboxing at the Python language +level, i.e. placing restrictions on what kind of Python code the +attacker is allowed to run (why? read about pysandbox_). + +.. _SECCOMP: http://code.google.com/p/seccompsandbox/wiki/overview +.. _pysandbox: https://mail.python.org/pipermail/python-dev/2013-November/130132.html + +Another point of comparison: if we were instead to try to plug CPython +into a special virtualizing C library, we would get a result +that is not only OS-specific, but unsafe, because CPython can be +segfaulted (in many ways, all of them really, really obscure). +Given enough efforts, an attacker can turn almost any +segfault into a vulnerability. The C code generated by PyPy is not segfaultable, as long as our code generators are correct - -that's a lower number of lines of code to trust. For the paranoid, in -this case we also generate systematic run-time checks against buffer -overflows. +that's a lower number of lines of code to trust. For the paranoid, +PyPy translated with sandboxing also contains systematic run-time +checks (against buffer overflows for example) +that are normally only present in debugging versions. .. warning:: diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-2.2.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-2.2.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-2.2.rst @@ -1,5 +1,5 @@ ====================== -What's new in PyPy 2.1 +What's new in PyPy 2.2 ====================== .. this is a revision shortly after release-2.1-beta diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,140 +1,15 @@ -====================== -What's new in PyPy 2.1 -====================== +======================= +What's new in PyPy 2.2+ +======================= -.. this is a revision shortly after release-2.1-beta -.. startrev: 4eb52818e7c0 +.. this is a revision shortly after release-2.2.x +.. startrev: 4cd1bc8b3111 -.. branch: sanitise_bytecode_dispatch -Make PyPy's bytecode dispatcher easy to read, and less reliant on RPython -magic. There is no functional change, though the removal of dead code leads -to many fewer tests to execute. +.. branch: release-2.2.x -.. branch: fastjson -Fast json decoder written in RPython, about 3-4x faster than the pure Python -decoder which comes with the stdlib +.. branch: numpy-newbyteorder +Clean up numpy types, add newbyteorder functionality -.. branch: improve-str2charp -Improve the performance of I/O writing up to 15% by using memcpy instead of -copying char-by-char in str2charp and get_nonmovingbuffer +.. branch: windows-packaging +Package tk/tcl runtime with win32 -.. branch: flowoperators -Simplify rpython/flowspace/ code by using more metaprogramming. Create -SpaceOperator class to gather static information about flow graph operations. - -.. branch: package-tk -Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch -to optionally skip it. - -.. branch: distutils-cppldflags -Copy CPython's implementation of customize_compiler, dont call split on -environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. - -.. branch: precise-instantiate -When an RPython class is instantiated via an indirect call (that is, which -class is being instantiated isn't known precisely) allow the optimizer to have -more precise information about which functions can be called. Needed for Topaz. - -.. branch: ssl_moving_write_buffer - -.. branch: pythoninspect-fix -Make PyPy respect PYTHONINSPECT variable set via os.putenv in the same process -to start interactive prompt when the script execution finishes. This adds -new __pypy__.os.real_getenv call that bypasses Python cache and looksup env -in the underlying OS. Translatorshell now works on PyPy. - -.. branch: add-statvfs -Added os.statvfs and os.fstatvfs - -.. branch: statvfs_tests -Added some addition tests for statvfs. - -.. branch: ndarray-subtype -Allow subclassing ndarray, i.e. matrix - -.. branch: ndarray-sort -Implement ndarray in-place sorting (for numeric types, no non-native byte order) - -.. branch: pypy-pyarray -Implement much of numpy's c api in cpyext, allows (slow) access to ndarray -from c - -.. branch: kill-ootype - -.. branch: fast-slowpath -Added an abstraction for functions with a fast and slow path in the JIT. This -speeds up list.append() and list.pop(). - -.. branch: curses_fixes - -.. branch: foldable-getarrayitem-indexerror -Constant-fold reading out of constant tuples in PyPy. - -.. branch: mro-reorder-numpypy-str -No longer delegate numpy string_ methods to space.StringObject, in numpy -this works by kind of by accident. Support for merging the refactor-str-types -branch - -.. branch: kill-typesystem -Remove the "type system" abstraction, now that there is only ever one kind of -type system used. - -.. branch: kill-gen-store-back-in -Kills gen_store_back_in_virtualizable - should improve non-inlined calls by -a bit - -.. branch: dotviewer-linewidth -.. branch: reflex-support -.. branch: numpypy-inplace-op -.. branch: rewritten-loop-logging -.. branch: no-release-gil -.. branch: safe-win-mmap -.. branch: boolean-indexing-cleanup -.. branch: cpyext-best_base -.. branch: cpyext-int -.. branch: fileops2 - -.. branch: nobold-backtrace -Work on improving UnionError messages and stack trace displays. - -.. branch: improve-errors-again -More improvements and refactorings of error messages. - -.. branch: improve-errors-again2 -Unbreak tests in rlib. - -.. branch: less-stringly-ops -Use subclasses of SpaceOperation instead of SpaceOperator objects. -Random cleanups in flowspace. - -.. branch: file-support-in-rpython -make open() and friends rpython - -.. branch: incremental-gc -Added the new incminimark GC which performs GC in incremental steps - -.. branch: fast_cffi_list_init -fastpath for cffi.new("long[]") - -.. branch: remove-eval-frame -remove a pointless abstraction - -.. branch: jit-settrace -Allow the jit to continue running when sys.settrace() is active, necessary to -make coverage.py fast - -.. branch: remove-numpypy -Remove lib_pypy/numpypy in favor of external numpy fork - -.. branch: jit-counter -Tweak the jit counters: decay them at minor collection (actually -only every 32 minor collection is enough). Should avoid the "memory -leaks" observed in long-running processes, actually created by the -jit compiling more and more rarely executed paths. - -.. branch: fix-trace-jit -Fixed the usage of sys.settrace() with the JIT. Also made it so using -sys.settrace() doesn't cause the GIL to be released on every single iteration. - -.. branch: rordereddict -Implement OrderedDict in RPython diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -73,11 +73,11 @@ https://bitbucket.org/pypy/pypy/downloads/local.zip Then expand it into the base directory (base_dir) and modify your environment to reflect this:: - set PATH=\bin;%PATH% - set INCLUDE=\include;%INCLUDE% - set LIB=\lib;%LIB% + set PATH=\bin;\tcltk\bin;%PATH% + set INCLUDE=\include;\tcltk\include;%INCLUDE% + set LIB=\lib;\tcltk\lib;%LIB% -Now you should be good to go. Read on for more information. +Now you should be good to go. Read on for more information. The Boehm garbage collector ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -109,11 +109,10 @@ The bz2 compression library ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Download http://bzip.org/1.0.5/bzip2-1.0.5.tar.gz and extract it in -the base directory. Then compile:: - - cd bzip2-1.0.5 + svn export http://svn.python.org/projects/external/bzip2-1.0.6 + cd bzip2-1.0.6 nmake -f makefile.msc + copy bzip.dll \bzip.dll The sqlite3 database library ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -122,8 +121,6 @@ wrapper is compiled when the module is imported for the first time. The sqlite3.dll should be version 3.6.21 for CPython2.7 compatablility. - - The expat XML parser ~~~~~~~~~~~~~~~~~~~~ @@ -146,13 +143,33 @@ use the one distributed by ActiveState, or the one from cygwin. In both case the perl interpreter must be found on the PATH. -Get http://www.openssl.org/source/openssl-0.9.8k.tar.gz and extract it -in the base directory. Then compile:: - + svn export http://svn.python.org/projects/external/openssl-0.9.8y + cd openssl-0.9.8y perl Configure VC-WIN32 ms\do_ms.bat nmake -f ms\nt.mak install +TkInter module support +~~~~~~~~~~~~~~~~~~~~~~ + +Note that much of this is taken from the cpython build process. +Tkinter is imported via cffi, so the module is optional. To recreate the tcltk +directory found for the release script, create the dlls, libs, headers and +runtime by running:: + + svn export http://svn.python.org/projects/external/tcl-8.5.2.1 tcl85 + svn export http://svn.python.org/projects/external/tk-8.5.2.0 tk85 + cd tcl85\win + nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 DEBUG=0 INSTALLDIR=..\..\tcltk clean all + nmake -f makefile.vc DEBUG=0 INSTALLDIR=..\..\tcltk install + cd ..\..\tk85\win + nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl85 clean all + nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl85 install + +Now you should have a tcktk\bin, tcltk\lib, and tcltk\include directory ready +for use. The release packaging script will pick up the tcltk runtime in the lib +directory and put it in the archive. + Using the mingw compiler ------------------------ diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -942,7 +942,8 @@ self.w_tmp_dir = self.space.wrap(tmp_dir) - foo_py = prefix.join('foo.py').write("pass") + foo_py = prefix.join('foo.py') + foo_py.write("pass") self.w_foo_py = self.space.wrap(str(foo_py)) def test_setup_bootstrap_path(self): diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.mixedmodule import MixedModule from rpython.rlib import rdynload @@ -43,6 +44,8 @@ 'FFI_DEFAULT_ABI': 'ctypefunc._get_abi(space, "FFI_DEFAULT_ABI")', 'FFI_CDECL': 'ctypefunc._get_abi(space,"FFI_DEFAULT_ABI")',#win32 name } + if sys.platform == 'win32': + interpleveldefs['getwinerror'] = 'cerrno.getwinerror' for _name in ["RTLD_LAZY", "RTLD_NOW", "RTLD_GLOBAL", "RTLD_LOCAL", "RTLD_NODELETE", "RTLD_NOLOAD", "RTLD_DEEPBIND"]: diff --git a/pypy/module/_cffi_backend/cerrno.py b/pypy/module/_cffi_backend/cerrno.py --- a/pypy/module/_cffi_backend/cerrno.py +++ b/pypy/module/_cffi_backend/cerrno.py @@ -39,3 +39,14 @@ def set_errno(space, errno): ec = get_errno_container(space) ec._cffi_saved_errno = errno + +# ____________________________________________________________ + + at unwrap_spec(code=int) +def getwinerror(space, code=-1): + from rpython.rlib.rwin32 import FormatError + if code == -1: + ec = get_errno_container(space) + code = ec._cffi_saved_LastError + message = FormatError(code) + return space.newtuple([space.wrap(code), space.wrap(message)]) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1086,7 +1086,9 @@ assert strlenaddr == cast(BVoidP, strlen) def test_read_variable(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -1094,7 +1096,9 @@ assert stderr == cast(BVoidP, _testfunc(8)) def test_read_variable_as_unknown_length_array(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BCharP = new_pointer_type(new_primitive_type("char")) BArray = new_array_type(BCharP, None) @@ -1104,7 +1108,9 @@ # ^^ and not 'char[]', which is basically not allowed and would crash def test_write_variable(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -2687,6 +2693,16 @@ # res = GetLastError() assert res == 42 + # + SetLastError(2) + code, message = getwinerror() + assert code == 2 + assert message == "The system cannot find the file specified" + # + code, message = getwinerror(1155) + assert code == 1155 + assert message == ("No application is associated with the " + "specified file for this operation") def test_nonstandard_integer_types(): for typename in ['int8_t', 'uint8_t', 'int16_t', 'uint16_t', 'int32_t', diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py --- a/pypy/module/_pypyjson/interp_decoder.py +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -2,7 +2,7 @@ import math from rpython.rlib.rstring import StringBuilder from rpython.rlib.objectmodel import specialize -from rpython.rlib import rfloat +from rpython.rlib import rfloat, runicode from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec @@ -373,7 +373,7 @@ return # help the annotator to know that we'll never go beyond # this point # - uchr = unichr(val) + uchr = runicode.code_to_unichr(val) # may be a surrogate pair again utf8_ch = unicodehelper.encode_utf8(self.space, uchr) builder.append(utf8_ch) return i diff --git a/pypy/module/_pypyjson/test/test__pypyjson.py b/pypy/module/_pypyjson/test/test__pypyjson.py --- a/pypy/module/_pypyjson/test/test__pypyjson.py +++ b/pypy/module/_pypyjson/test/test__pypyjson.py @@ -1,5 +1,4 @@ # -*- encoding: utf-8 -*- -import py, sys from pypy.module._pypyjson.interp_decoder import JSONDecoder def test_skip_whitespace(): @@ -16,9 +15,6 @@ class AppTest(object): spaceconfig = {"objspace.usemodules._pypyjson": True} - def setup_class(cls): - cls.w_run_on_16bit = cls.space.wrap(sys.maxunicode == 65535) - def test_raise_on_unicode(self): import _pypyjson raises(TypeError, _pypyjson.loads, u"42") @@ -183,8 +179,6 @@ raises(ValueError, """_pypyjson.loads('["extra comma",]')""") def test_unicode_surrogate_pair(self): - if self.run_on_16bit: - skip("XXX fix me or mark definitely skipped") import _pypyjson expected = u'z\U0001d120x' res = _pypyjson.loads('"z\\ud834\\udd20x"') diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -10,9 +10,11 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer -from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app, unwrap_spec, interpindirect2app -from pypy.interpreter.typedef import GetSetProperty, make_weakref_descr, TypeDef +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import ( + interp2app, interpindirect2app, unwrap_spec) +from pypy.interpreter.typedef import ( + GetSetProperty, TypeDef, make_weakref_descr) from pypy.module._file.interp_file import W_File from pypy.objspace.std.floatobject import W_FloatObject @@ -60,12 +62,12 @@ def descr_typecode(space, self): return space.wrap(self.typecode) -arr_eq_driver = jit.JitDriver(name='array_eq_driver', greens = ['comp_func'], reds = 'auto') +arr_eq_driver = jit.JitDriver(name='array_eq_driver', greens=['comp_func'], + reds='auto') EQ, NE, LT, LE, GT, GE = range(6) def compare_arrays(space, arr1, arr2, comp_op): - if (not isinstance(arr1, W_ArrayBase) or - not isinstance(arr2, W_ArrayBase)): + if not (isinstance(arr1, W_ArrayBase) and isinstance(arr2, W_ArrayBase)): return space.w_NotImplemented if comp_op == EQ and arr1.len != arr2.len: return space.w_False @@ -236,9 +238,12 @@ raise OperationError(self.space.w_ValueError, self.space.wrap(msg)) oldlen = self.len new = len(s) / self.itemsize + if not new: + return self.setlen(oldlen + new) cbuf = self._charbuf_start() - copy_string_to_raw(llstr(s), rffi.ptradd(cbuf, oldlen * self.itemsize), 0, len(s)) + copy_string_to_raw(llstr(s), rffi.ptradd(cbuf, oldlen * self.itemsize), + 0, len(s)) self._charbuf_stop() @unwrap_spec(w_f=W_File, n=int) @@ -268,8 +273,8 @@ def descr_tofile(self, space, w_f): """ tofile(f) - Write all items (as machine values) to the file object f. Also called as - write. + Write all items (as machine values) to the file object f. Also + called as write. """ w_s = self.descr_tostring(space) space.call_method(w_f, 'write', w_s) @@ -351,8 +356,8 @@ def descr_byteswap(self, space): """ byteswap() - Byteswap all items of the array. If the items in the array are not 1, 2, - 4, or 8 bytes in size, RuntimeError is raised. + Byteswap all items of the array. If the items in the array are + not 1, 2, 4, or 8 bytes in size, RuntimeError is raised. """ if self.itemsize not in [1, 2, 4, 8]: msg = "byteswap not supported for this array" @@ -434,7 +439,8 @@ return self.delitem(space, start, stop) def descr_delslice(self, space, w_start, w_stop): - self.descr_delitem(space, space.newslice(w_start, w_stop, space.w_None)) + self.descr_delitem(space, space.newslice(w_start, w_stop, + space.w_None)) def descr_add(self, space, w_other): raise NotImplementedError @@ -478,7 +484,7 @@ W_ArrayBase.typedef = TypeDef( 'array', __new__ = interp2app(w_array), - __module__ = 'array', + __module__ = 'array', __len__ = interp2app(W_ArrayBase.descr_len), __eq__ = interp2app(W_ArrayBase.descr_eq), @@ -534,7 +540,8 @@ class TypeCode(object): - def __init__(self, itemtype, unwrap, canoverflow=False, signed=False, method='__int__'): + def __init__(self, itemtype, unwrap, canoverflow=False, signed=False, + method='__int__'): self.itemtype = itemtype self.bytes = rffi.sizeof(itemtype) self.arraytype = lltype.Array(itemtype, hints={'nolength': True}) @@ -547,7 +554,7 @@ if self.canoverflow: assert self.bytes <= rffi.sizeof(rffi.ULONG) if self.bytes == rffi.sizeof(rffi.ULONG) and not signed and \ - self.unwrap == 'int_w': + self.unwrap == 'int_w': # Treat this type as a ULONG self.unwrap = 'bigint_w' self.canoverflow = False @@ -619,14 +626,15 @@ try: item = unwrap(w_item) except OperationError, e: - if isinstance(w_item, W_FloatObject): # Odd special case from cpython + if isinstance(w_item, W_FloatObject): + # Odd special case from cpython raise if mytype.method != '' and e.match(space, space.w_TypeError): try: item = unwrap(space.call_method(w_item, mytype.method)) except OperationError: msg = 'array item must be ' + mytype.unwrap[:-2] - raise OperationError(space.w_TypeError, space.wrap(msg)) + raise operationerrfmt(space.w_TypeError, msg) else: raise if mytype.unwrap == 'bigint_w': @@ -681,14 +689,13 @@ some = 0 self.allocated = size + some if zero: - new_buffer = lltype.malloc(mytype.arraytype, - self.allocated, flavor='raw', - add_memory_pressure=True, - zero=True) + new_buffer = lltype.malloc( + mytype.arraytype, self.allocated, flavor='raw', + add_memory_pressure=True, zero=True) else: - new_buffer = lltype.malloc(mytype.arraytype, - self.allocated, flavor='raw', - add_memory_pressure=True) + new_buffer = lltype.malloc( + mytype.arraytype, self.allocated, flavor='raw', + add_memory_pressure=True) for i in range(min(size, self.len)): new_buffer[i] = self.buffer[i] else: @@ -882,9 +889,9 @@ if i >= j: return None oldbuffer = self.buffer - self.buffer = lltype.malloc(mytype.arraytype, - max(self.len - (j - i), 0), flavor='raw', - add_memory_pressure=True) + self.buffer = lltype.malloc( + mytype.arraytype, max(self.len - (j - i), 0), flavor='raw', + add_memory_pressure=True) if i: rffi.c_memcpy( rffi.cast(rffi.VOIDP, self.buffer), diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -171,6 +171,9 @@ a = self.array('c') a.fromstring('Hi!') assert a[0] == 'H' and a[1] == 'i' and a[2] == '!' and len(a) == 3 + a = self.array('c') + a.fromstring('') + assert not len(a) for t in 'bBhHiIlLfd': a = self.array(t) diff --git a/pypy/module/binascii/interp_crc32.py b/pypy/module/binascii/interp_crc32.py --- a/pypy/module/binascii/interp_crc32.py +++ b/pypy/module/binascii/interp_crc32.py @@ -1,17 +1,12 @@ from pypy.interpreter.gateway import unwrap_spec +from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rarithmetic import r_uint, intmask -from rpython.rtyper.lltypesystem import rffi -from rpython.rlib.rzipfile import crc_32_tab +from rpython.rlib import rzipfile @unwrap_spec(data='bufferstr', oldcrc='truncatedint_w') def crc32(space, data, oldcrc=0): "Compute the CRC-32 incrementally." - crc = r_uint(rffi.cast(rffi.UINT, ~oldcrc)) # signed => 32-bit unsigned - - # in the following loop, we have always 0 <= crc < 2**32 - for c in data: - crc = crc_32_tab[(crc & 0xff) ^ ord(c)] ^ (crc >> 8) - - crc = ~intmask(rffi.cast(rffi.INT, crc)) # unsigned => 32-bit signed - return space.wrap(crc) + crc = rzipfile.crc32(data, r_uint(oldcrc)) + crc = rffi.cast(rffi.INT, crc) # unsigned => 32-bit signed + return space.wrap(intmask(crc)) diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.3" /* PyPy version as a string */ -#define PYPY_VERSION "2.2.1-alpha0" +#define PYPY_VERSION "2.3.0-alpha0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -80,7 +80,7 @@ # u = interp_marshal.StringUnmarshaller(space, space.wrap(expected)) w_long = u.load_w_obj() - assert space.eq_w(w_long, w_obj) is True + assert space.eq_w(w_long, w_obj) for sign in [1L, -1L]: for i in range(100): diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -36,10 +36,13 @@ return backstrides def getitem(self, index): - return self.dtype.getitem(self, index) + return self.dtype.itemtype.read(self, index, 0) + + def getitem_bool(self, index): + return self.dtype.itemtype.read_bool(self, index, 0) def setitem(self, index, value): - self.dtype.setitem(self, index, value) + self.dtype.itemtype.store(self, index, 0, value) def setslice(self, space, arr): impl = arr.implementation @@ -52,7 +55,7 @@ loop.setslice(space, shape, self, impl) def get_size(self): - return self.size // self.dtype.itemtype.get_element_size() + return self.size // self.dtype.get_size() def get_storage_size(self): return self.size @@ -77,7 +80,7 @@ return scalar.Scalar(self.dtype, self.getitem(0)) return None - def get_view(self, orig_array, dtype, new_shape): + def get_view(self, space, orig_array, dtype, new_shape): strides, backstrides = support.calc_strides(new_shape, dtype, self.order) return SliceArray(self.start, strides, backstrides, new_shape, @@ -208,7 +211,15 @@ "field named %s not found" % idx)) return RecordChunk(idx) if (space.isinstance_w(w_idx, space.w_int) or - space.isinstance_w(w_idx, space.w_slice)): + space.isinstance_w(w_idx, space.w_slice)): + return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) + elif isinstance(w_idx, W_NDimArray) and \ + isinstance(w_idx.implementation, scalar.Scalar): + w_idx = w_idx.get_scalar_value().item(space) + if not space.isinstance_w(w_idx, space.w_int) and \ + not space.isinstance_w(w_idx, space.w_bool): + raise OperationError(space.w_IndexError, space.wrap( + "arrays used as indices must be of integer (or boolean) type")) return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) elif space.is_w(w_idx, space.w_None): return Chunks([NewAxisChunk()]) @@ -268,7 +279,7 @@ def create_dot_iter(self, shape, skip): r = calculate_dot_strides(self.get_strides(), self.get_backstrides(), shape, skip) - return iter.MultiDimViewIterator(self, self.dtype, self.start, r[0], r[1], shape) + return iter.MultiDimViewIterator(self, self.start, r[0], r[1], shape) def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] @@ -331,21 +342,24 @@ support.product(shape) > support.product(self.get_shape()): r = calculate_broadcast_strides(self.get_strides(), self.get_backstrides(), - self.get_shape(), shape, backward_broadcast) - return iter.MultiDimViewIterator(self, self.dtype, self.start, r[0], r[1], shape) - + self.get_shape(), shape, + backward_broadcast) + return iter.MultiDimViewIterator(self, self.start, + r[0], r[1], shape) if not require_index: return iter.ConcreteArrayIterator(self) - else: - if len(self.get_shape()) == 1: - return iter.OneDimViewIterator(self, self.dtype, self.start, - self.get_strides(), self.get_shape()) - else: - return iter.MultiDimViewIterator(self, self.dtype, self.start, - self.get_strides(), self.get_backstrides(), self.get_shape()) + if len(self.get_shape()) == 1: + return iter.OneDimViewIterator(self, self.start, + self.get_strides(), + self.get_shape()) + return iter.MultiDimViewIterator(self, self.start, + self.get_strides(), + self.get_backstrides(), + self.get_shape()) def fill(self, box): - self.dtype.fill(self.storage, box, 0, self.size) + self.dtype.itemtype.fill(self.storage, self.dtype.get_size(), + box, 0, self.size, 0) def set_shape(self, space, orig_array, new_shape): strides, backstrides = support.calc_strides(new_shape, self.dtype, @@ -399,7 +413,7 @@ self.storage = parent.storage self.order = parent.order self.dtype = dtype - self.size = support.product(shape) * self.dtype.itemtype.get_element_size() + self.size = support.product(shape) * self.dtype.get_size() self.start = start self.orig_arr = orig_arr @@ -416,14 +430,16 @@ self.get_backstrides(), self.get_shape(), shape, backward_broadcast) - return iter.MultiDimViewIterator(self.parent, self.dtype, - self.start, r[0], r[1], shape) + return iter.MultiDimViewIterator(self, self.start, + r[0], r[1], shape) if len(self.get_shape()) == 1: - return iter.OneDimViewIterator(self.parent, self.dtype, self.start, - self.get_strides(), self.get_shape()) - return iter.MultiDimViewIterator(self.parent, self.dtype, self.start, - self.get_strides(), - self.get_backstrides(), self.get_shape()) + return iter.OneDimViewIterator(self, self.start, + self.get_strides(), + self.get_shape()) + return iter.MultiDimViewIterator(self, self.start, + self.get_strides(), + self.get_backstrides(), + self.get_shape()) def set_shape(self, space, orig_array, new_shape): if len(self.get_shape()) < 2 or self.size == 0: diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -68,9 +68,15 @@ def transpose(self, _): return self - def get_view(self, orig_array, dtype, new_shape): + def get_view(self, space, orig_array, dtype, new_shape): scalar = Scalar(dtype) - scalar.value = self.value.convert_to(dtype) + if dtype.is_str_or_unicode(): + scalar.value = dtype.coerce(space, space.wrap(self.value.raw_str())) + elif dtype.is_record_type(): + raise OperationError(space.w_NotImplementedError, space.wrap( + "viewing scalar as record not implemented")) + else: + scalar.value = dtype.itemtype.runpack_str(space, self.value.raw_str()) return scalar def get_real(self, orig_array): @@ -123,20 +129,24 @@ ) def descr_getitem(self, space, _, w_idx): + if space.isinstance_w(w_idx, space.w_tuple): + if space.len_w(w_idx) == 0: + return self.get_scalar_value() raise OperationError(space.w_IndexError, - space.wrap("scalars cannot be indexed")) + space.wrap("0-d arrays can't be indexed")) def getitem_index(self, space, idx): raise OperationError(space.w_IndexError, - space.wrap("scalars cannot be indexed")) + space.wrap("0-d arrays can't be indexed")) def descr_setitem(self, space, _, w_idx, w_val): raise OperationError(space.w_IndexError, - space.wrap("scalars cannot be indexed")) + space.wrap("0-d arrays can't be indexed")) def setitem_index(self, space, idx, w_val): raise OperationError(space.w_IndexError, - space.wrap("scalars cannot be indexed")) + space.wrap("0-d arrays can't be indexed")) + def set_shape(self, space, orig_array, new_shape): if not new_shape: return self diff --git a/pypy/module/micronumpy/conversion_utils.py b/pypy/module/micronumpy/conversion_utils.py --- a/pypy/module/micronumpy/conversion_utils.py +++ b/pypy/module/micronumpy/conversion_utils.py @@ -1,6 +1,27 @@ from pypy.interpreter.error import OperationError from pypy.module.micronumpy.constants import * + +def byteorder_converter(space, new_order): + endian = new_order[0] + if endian not in (NPY_BIG, NPY_LITTLE, NPY_NATIVE, NPY_IGNORE, NPY_SWAP): + ch = endian + if ch in ('b', 'B'): + endian = NPY_BIG + elif ch in ('l', 'L'): + endian = NPY_LITTLE + elif ch in ('n', 'N'): + endian = NPY_NATIVE + elif ch in ('i', 'I'): + endian = NPY_IGNORE + elif ch in ('s', 'S'): + endian = NPY_SWAP + else: + raise OperationError(space.w_ValueError, space.wrap( + "%s is an unrecognized byteorder" % new_order)) + return endian + + def clipmode_converter(space, w_mode): if space.is_none(w_mode): return NPY_RAISE @@ -19,6 +40,7 @@ raise OperationError(space.w_TypeError, space.wrap("clipmode not understood")) + def order_converter(space, w_order, default): if space.is_none(w_order): return default @@ -40,3 +62,25 @@ else: raise OperationError(space.w_TypeError, space.wrap( "order not understood")) + + +def multi_axis_converter(space, w_axis, ndim): + if space.is_none(w_axis): + return [True] * ndim + out = [False] * ndim + if not space.isinstance_w(w_axis, space.w_tuple): + w_axis = space.newtuple([w_axis]) + for w_item in space.fixedview(w_axis): + item = space.int_w(w_item) + axis = item + if axis < 0: + axis += ndim + if axis < 0 or axis >= ndim: + raise OperationError(space.w_ValueError, space.wrap( + "'axis' entry %d is out of bounds [-%d, %d)" % + (item, ndim, ndim))) + if out[axis]: + raise OperationError(space.w_ValueError, space.wrap( + "duplicate value in 'axis'")) + out[axis] = True + return out diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -106,16 +106,26 @@ args_w = [convert_to_array(space, w_arg) for w_arg in args_w] dtype = args_w[0].get_dtype() shape = args_w[0].get_shape()[:] - _axis = axis + ndim = len(shape) + orig_axis = axis if axis < 0: - _axis = len(shape) + axis + axis = ndim + axis + if ndim == 1 and axis != 0: + axis = 0 + if axis < 0 or axis >= ndim: + raise operationerrfmt(space.w_IndexError, + "axis %d out of bounds [0, %d)", orig_axis, ndim) for arr in args_w[1:]: + if len(arr.get_shape()) != ndim: + raise OperationError(space.w_ValueError, space.wrap( + "all the input arrays must have same number of dimensions")) for i, axis_size in enumerate(arr.get_shape()): - if len(arr.get_shape()) != len(shape) or (i != _axis and axis_size != shape[i]): + if i == axis: + shape[i] += axis_size + elif axis_size != shape[i]: raise OperationError(space.w_ValueError, space.wrap( - "all the input arrays must have same number of dimensions")) - elif i == _axis: - shape[i] += axis_size + "all the input array dimensions except for the " + "concatenation axis must match exactly")) a_dt = arr.get_dtype() if dtype.is_record_type() and a_dt.is_record_type(): # Record types must match @@ -129,19 +139,17 @@ space.wrap("invalid type promotion")) dtype = interp_ufuncs.find_binop_result_dtype(space, dtype, arr.get_dtype()) - if _axis < 0 or len(arr.get_shape()) <= _axis: - raise operationerrfmt(space.w_IndexError, "axis %d out of bounds [0, %d)", axis, len(shape)) # concatenate does not handle ndarray subtypes, it always returns a ndarray res = W_NDimArray.from_shape(space, shape, dtype, 'C') chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: - if arr.get_shape()[_axis] == 0: + if arr.get_shape()[axis] == 0: continue - chunks[_axis] = Chunk(axis_start, axis_start + arr.get_shape()[_axis], 1, - arr.get_shape()[_axis]) + chunks[axis] = Chunk(axis_start, axis_start + arr.get_shape()[axis], 1, + arr.get_shape()[axis]) Chunks(chunks).apply(space, res).implementation.setslice(space, arr) - axis_start += arr.get_shape()[_axis] + axis_start += arr.get_shape()[axis] return res @unwrap_spec(repeats=int) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -255,7 +255,10 @@ return convert_to_array(space, w_values) @unwrap_spec(decimals=int) - def descr_round(self, space, decimals=0): + def descr_round(self, space, decimals=0, w_out=None): + if not space.is_none(w_out): + raise OperationError(space.w_NotImplementedError, space.wrap( + "out not supported")) v = self.convert_to(self.get_dtype(space)) return self.get_dtype(space).itemtype.round(v, decimals) @@ -269,11 +272,19 @@ from pypy.module.micronumpy.interp_dtype import W_Dtype dtype = space.interp_w(W_Dtype, space.call_function(space.gettypefor(W_Dtype), w_dtype)) + if dtype.get_size() == 0: + raise OperationError(space.w_TypeError, space.wrap( + "data-type must not be 0-sized")) if dtype.get_size() != self.get_dtype(space).get_size(): raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) - raise OperationError(space.w_NotImplementedError, space.wrap( - "view not implelemnted yet")) + if dtype.is_str_or_unicode(): + return dtype.coerce(space, space.wrap(self.raw_str())) + elif dtype.is_record_type(): + raise OperationError(space.w_NotImplementedError, space.wrap( + "viewing scalar as record not implemented")) + else: + return dtype.itemtype.runpack_str(space, self.raw_str()) def descr_self(self, space): return self @@ -281,6 +292,9 @@ def descr_get_dtype(self, space): return self.get_dtype(space) + def descr_get_size(self, space): + return space.wrap(1) + def descr_get_itemsize(self, space): return self.get_dtype(space).descr_get_itemsize(space) @@ -407,6 +421,9 @@ def get_dtype(self, space): return self.arr.dtype + def raw_str(self): + return self.arr.dtype.itemtype.to_str(self) + class W_VoidBox(W_FlexibleBox): def descr_getitem(self, space, w_item): if space.isinstance_w(w_item, space.w_basestring): @@ -551,6 +568,7 @@ copy = interp2app(W_GenericBox.descr_copy), dtype = GetSetProperty(W_GenericBox.descr_get_dtype), + size = GetSetProperty(W_GenericBox.descr_get_size), itemsize = GetSetProperty(W_GenericBox.descr_get_itemsize), nbytes = GetSetProperty(W_GenericBox.descr_get_itemsize), shape = GetSetProperty(W_GenericBox.descr_get_shape), diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -3,20 +3,22 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import (TypeDef, GetSetProperty, - interp_attrproperty, interp_attrproperty_w) + interp_attrproperty, interp_attrproperty_w) from pypy.module.micronumpy import types, interp_boxes, base from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong from rpython.rtyper.lltypesystem import rffi from rpython.rlib import jit +from pypy.module.micronumpy.conversion_utils import byteorder_converter from pypy.module.micronumpy.constants import * def decode_w_dtype(space, w_dtype): if space.is_none(w_dtype): return None - return space.interp_w(W_Dtype, - space.call_function(space.gettypefor(W_Dtype), w_dtype)) + return space.interp_w( + W_Dtype, space.call_function(space.gettypefor(W_Dtype), w_dtype)) + @jit.unroll_safe def dtype_agreement(space, w_arr_list, shape, out=None): @@ -33,11 +35,14 @@ out = base.W_NDimArray.from_shape(space, shape, dtype) return out + class W_Dtype(W_Root): - _immutable_fields_ = ["itemtype?", "num", "kind", "name?", "char", "w_box_type", "byteorder", "float_type"] + _immutable_fields_ = ["itemtype?", "num", "kind", "name?", "char", + "w_box_type", "byteorder", "size?", "float_type", + "fields?", "fieldnames?", "shape", "subdtype", "base"] def __init__(self, itemtype, num, kind, name, char, w_box_type, byteorder=NPY_NATIVE, - alternate_constructors=[], aliases=[], float_type=None, + size=1, alternate_constructors=[], aliases=[], float_type=None, fields=None, fieldnames=None, shape=[], subdtype=None): self.itemtype = itemtype self.num = num @@ -46,6 +51,7 @@ self.char = char self.w_box_type = w_box_type self.byteorder = byteorder + self.size = size self.alternate_constructors = alternate_constructors self.aliases = aliases self.float_type = float_type @@ -77,19 +83,6 @@ def coerce(self, space, w_item): return self.itemtype.coerce(space, self, w_item) - def getitem(self, arr, i): - item = self.itemtype.read(arr, i, 0) - return item - - def getitem_bool(self, arr, i): - return self.itemtype.read_bool(arr, i, 0) - - def setitem(self, arr, i, box): - self.itemtype.store(arr, i, 0, box) - - def fill(self, storage, box, start, stop): - self.itemtype.fill(storage, self.get_size(), box, start, stop, 0) - def is_int_type(self): return (self.kind == NPY_SIGNEDLTR or self.kind == NPY_UNSIGNEDLTR or self.kind == NPY_GENBOOLLTR) @@ -101,7 +94,7 @@ return self.kind == NPY_COMPLEXLTR def is_float_type(self): - return (self.kind == NPY_FLOATINGLTR or self.float_type is not None) + return self.kind == NPY_FLOATINGLTR or self.kind == NPY_COMPLEXLTR def is_bool_type(self): return self.kind == NPY_GENBOOLLTR @@ -122,7 +115,7 @@ return self.byteorder in (NPY_NATIVE, NPY_NATBYTE) def get_size(self): - return self.itemtype.get_element_size() + return self.size * self.itemtype.get_element_size() def get_name(self): if self.char == 'S': @@ -136,7 +129,7 @@ return space.wrap("dtype('%s')" % self.get_name()) def descr_get_itemsize(self, space): - return space.wrap(self.itemtype.get_element_size()) + return space.wrap(self.get_size()) def descr_get_alignment(self, space): return space.wrap(self.itemtype.alignment) @@ -150,7 +143,7 @@ if basic == NPY_UNICODELTR: size >>= 2 endian = NPY_NATBYTE - elif size <= 1: + elif size // (self.size or 1) <= 1: endian = NPY_IGNORE else: endian = self.byteorder @@ -158,6 +151,14 @@ endian = NPY_NATBYTE return space.wrap("%s%s%s" % (endian, basic, size)) + def descr_get_descr(self, space): + if not self.is_record_type(): + return space.newlist([space.newtuple([space.wrap(""), + self.descr_get_str(space)])]) + else: + raise OperationError(space.w_NotImplementedError, space.wrap( + "descr not implemented for record types")) + def descr_get_base(self, space): return space.wrap(self.base) @@ -196,7 +197,6 @@ self.fields = None else: self.fields = {} - ofs_and_items = [] size = 0 for key in space.listview(w_fields): value = space.getitem(w_fields, key) @@ -207,11 +207,11 @@ offset = space.int_w(space.getitem(value, space.wrap(1))) self.fields[space.str_w(key)] = offset, dtype - ofs_and_items.append((offset, dtype.itemtype)) - size += dtype.itemtype.get_element_size() + size += dtype.get_size() - self.itemtype = types.RecordType(ofs_and_items, size) - self.name = "void" + str(8 * self.itemtype.get_element_size()) + self.itemtype = types.RecordType() + self.size = size + self.name = "void" + str(8 * self.get_size()) def descr_get_names(self, space): if self.fieldnames is None: @@ -232,6 +232,9 @@ raise break + def descr_get_hasobject(self, space): + return space.w_False + def descr_getitem(self, space, w_item): if self.fields is None: raise OperationError(space.w_KeyError, space.wrap( @@ -263,7 +266,7 @@ w_class = space.type(self) kind = self.kind - elemsize = self.itemtype.get_element_size() + elemsize = self.get_size() builder_args = space.newtuple([space.wrap("%s%d" % (kind, elemsize)), space.wrap(0), space.wrap(1)]) version = space.wrap(3) @@ -308,11 +311,24 @@ fields = space.getitem(w_data, space.wrap(4)) self.set_fields(space, fields) + @unwrap_spec(new_order=str) + def descr_newbyteorder(self, space, new_order=NPY_SWAP): + newendian = byteorder_converter(space, new_order) + endian = self.byteorder + if endian != NPY_IGNORE: + if newendian == NPY_SWAP: + endian = NPY_OPPBYTE if self.is_native() else NPY_NATBYTE + elif newendian != NPY_IGNORE: + endian = newendian + itemtype = self.itemtype.__class__(endian in (NPY_NATIVE, NPY_NATBYTE)) + return W_Dtype(itemtype, self.num, self.kind, self.name, self.char, + self.w_box_type, endian, size=self.size) + + def dtype_from_list(space, w_lst): lst_w = space.listview(w_lst) fields = {} offset = 0 - ofs_and_items = [] fieldnames = [] for w_elem in lst_w: size = 1 @@ -320,7 +336,7 @@ if space.len_w(w_elem) == 3: w_fldname, w_flddesc, w_shape = space.fixedview(w_elem) if not base.issequence_w(space, w_shape): - w_shape = space.newtuple([w_shape,]) + w_shape = space.newtuple([w_shape]) else: w_fldname, w_flddesc = space.fixedview(w_elem, 2) subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc, w_shape=w_shape) @@ -329,27 +345,31 @@ raise OperationError(space.w_ValueError, space.wrap("two fields with the same name")) assert isinstance(subdtype, W_Dtype) fields[fldname] = (offset, subdtype) - ofs_and_items.append((offset, subdtype.itemtype)) - offset += subdtype.itemtype.get_element_size() * size + offset += subdtype.get_size() * size fieldnames.append(fldname) - itemtype = types.RecordType(ofs_and_items, offset) - return W_Dtype(itemtype, NPY_VOID, NPY_VOIDLTR, "void" + str(8 * itemtype.get_element_size()), - NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), fields=fields, - fieldnames=fieldnames) + itemtype = types.RecordType() + return W_Dtype(itemtype, NPY_VOID, NPY_VOIDLTR, + "void" + str(8 * offset * itemtype.get_element_size()), + NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), + fields=fields, fieldnames=fieldnames, size=offset) + def dtype_from_dict(space, w_dict): raise OperationError(space.w_NotImplementedError, space.wrap( "dtype from dict")) + def dtype_from_spec(space, name): raise OperationError(space.w_NotImplementedError, space.wrap( "dtype from spec")) + def descr__new__(space, w_subtype, w_dtype, w_align=None, w_copy=None, w_shape=None): # w_align and w_copy are necessary for pickling cache = get_dtype_cache(space) - if w_shape is not None and (space.isinstance_w(w_shape, space.w_int) or space.len_w(w_shape) > 0): + if w_shape is not None and (space.isinstance_w(w_shape, space.w_int) or + space.len_w(w_shape) > 0): subdtype = descr__new__(space, w_subtype, w_dtype, w_align, w_copy) assert isinstance(subdtype, W_Dtype) size = 1 @@ -360,8 +380,11 @@ dim = space.int_w(w_dim) shape.append(dim) size *= dim - return W_Dtype(types.VoidType(subdtype.itemtype.get_element_size() * size), NPY_VOID, NPY_VOIDLTR, "void" + str(8 * subdtype.itemtype.get_element_size() * size), - NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), shape=shape, subdtype=subdtype) + return W_Dtype(types.VoidType(), NPY_VOID, NPY_VOIDLTR, + "void" + str(8 * subdtype.get_size() * size), + NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), + shape=shape, subdtype=subdtype, + size=subdtype.get_size() * size) if space.is_none(w_dtype): return cache.w_float64dtype @@ -375,10 +398,10 @@ return cache.dtypes_by_name[name] except KeyError: pass - if name[0] in 'VSUc' or name[0] in '<>=' and name[1] in 'VSUc': + if name[0] in 'VSUc' or name[0] in '<>=|' and name[1] in 'VSUc': return variable_dtype(space, name) raise OperationError(space.w_TypeError, space.wrap( - "data type %s not understood" % name)) + "data type %s not understood" % name)) elif space.isinstance_w(w_dtype, space.w_list): return dtype_from_list(space, w_dtype) elif space.isinstance_w(w_dtype, space.w_tuple): @@ -413,6 +436,7 @@ __reduce__ = interp2app(W_Dtype.descr_reduce), __setstate__ = interp2app(W_Dtype.descr_setstate), + newbyteorder = interp2app(W_Dtype.descr_newbyteorder), type = interp_attrproperty_w("w_box_type", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), @@ -430,12 +454,14 @@ isnative = GetSetProperty(W_Dtype.descr_get_isnative), fields = GetSetProperty(W_Dtype.descr_get_fields), names = GetSetProperty(W_Dtype.descr_get_names), + hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), + descr = GetSetProperty(W_Dtype.descr_get_descr), ) W_Dtype.typedef.acceptable_as_base_class = False def variable_dtype(space, name): - if name[0] in '<>=': + if name[0] in '<>=|': name = name[1:] char = name[0] if len(name) == 1: @@ -450,17 +476,17 @@ size = 1 if char == NPY_STRINGLTR: - itemtype = types.StringType(size) + itemtype = types.StringType() basename = 'string' num = NPY_STRING w_box_type = space.gettypefor(interp_boxes.W_StringBox) elif char == NPY_VOIDLTR: - itemtype = types.VoidType(size) + itemtype = types.VoidType() basename = 'void' num = NPY_VOID From noreply at buildbot.pypy.org Tue Nov 19 09:31:46 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 19 Nov 2013 09:31:46 +0100 (CET) Subject: [pypy-commit] pypy armhf-singlefloat: document and close about to be merged branch Message-ID: <20131119083146.244311C0330@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: armhf-singlefloat Changeset: r68235:83952b768a05 Date: 2013-11-19 09:30 +0100 http://bitbucket.org/pypy/pypy/changeset/83952b768a05/ Log: document and close about to be merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -13,3 +13,5 @@ .. branch: windows-packaging Package tk/tcl runtime with win32 +.. branch: armhf-singlefloat +JIT support for singlefloats on ARM using the hardfloat ABI From noreply at buildbot.pypy.org Tue Nov 19 09:31:47 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 19 Nov 2013 09:31:47 +0100 (CET) Subject: [pypy-commit] pypy default: merge armhf-singlefloat Message-ID: <20131119083147.631281C0330@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r68236:cc499f0c7d91 Date: 2013-11-19 09:30 +0100 http://bitbucket.org/pypy/pypy/changeset/cc499f0c7d91/ Log: merge armhf-singlefloat diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -13,3 +13,5 @@ .. branch: windows-packaging Package tk/tcl runtime with win32 +.. branch: armhf-singlefloat +JIT support for singlefloats on ARM using the hardfloat ABI diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -227,20 +227,81 @@ class HardFloatCallBuilder(ARMCallbuilder): + next_arg_vfp = 0 + next_arg_svfp = 0 + + def get_next_vfp(self, tp): + assert tp in 'fS' + if self.next_arg_vfp == -1: + return None + if tp == 'S': + i = self.next_arg_svfp + next_vfp = (i >> 1) + 1 + if not (i + 1) & 1: # i is even + self.next_arg_vfp = max(self.next_arg_vfp, next_vfp) + self.next_arg_svfp = self.next_arg_vfp << 1 + else: + self.next_arg_svfp += 1 + self.next_arg_vfp = next_vfp + lst = r.svfp_argument_regs + else: # 64bit double + i = self.next_arg_vfp + self.next_arg_vfp += 1 + if self.next_arg_svfp >> 1 == i: + self.next_arg_svfp = self.next_arg_vfp << 1 + lst = r.vfp_argument_regs + try: + return lst[i] + except IndexError: + self.next_arg_vfp = self.next_arg_svfp = -1 + return None + def prepare_arguments(self): non_float_locs = [] non_float_regs = [] float_locs = [] float_regs = [] stack_args = [] + singlefloats = None arglocs = self.arglocs argtypes = self.argtypes count = 0 # stack alignment counter on_stack = 0 - for arg in arglocs: - if arg.type != FLOAT: + for i in range(len(arglocs)): + argtype = INT + if i < len(argtypes) and argtypes[i] == 'S': + argtype = argtypes[i] + arg = arglocs[i] + if arg.is_float(): + argtype = FLOAT + reg = self.get_next_vfp(argtype) + if reg: + assert len(float_regs) < len(r.vfp_argument_regs) + float_locs.append(arg) + assert reg not in float_regs + float_regs.append(reg) + else: # float argument that needs to go on the stack + if count % 2 != 0: + stack_args.append(None) + count = 0 + on_stack += 1 + stack_args.append(arg) + on_stack += 2 + elif argtype == 'S': + # Singlefloat argument + if singlefloats is None: + singlefloats = [] + tgt = self.get_next_vfp(argtype) + if tgt: + singlefloats.append((arg, tgt)) + else: # Singlefloat argument that needs to go on the stack + # treated the same as a regular core register argument + count += 1 + on_stack += 1 + stack_args.append(arg) + else: if len(non_float_regs) < len(r.argument_regs): reg = r.argument_regs[len(non_float_regs)] non_float_locs.append(arg) @@ -249,18 +310,6 @@ count += 1 on_stack += 1 stack_args.append(arg) - else: - if len(float_regs) < len(r.vfp_argument_regs): - reg = r.vfp_argument_regs[len(float_regs)] - float_locs.append(arg) - float_regs.append(reg) - else: # float argument that needs to go on the stack - if count % 2 != 0: - stack_args.append(None) - count = 0 - on_stack += 1 - stack_args.append(arg) - on_stack += 2 # align the stack if count % 2 != 0: stack_args.append(None) @@ -275,13 +324,28 @@ non_float_locs.append(self.fnloc) non_float_regs.append(r.r4) self.fnloc = r.r4 + # remap values stored in vfp registers + remap_frame_layout(self.asm, float_locs, float_regs, r.vfp_ip) + if singlefloats: + for src, dest in singlefloats: + if src.is_float(): + assert 0, 'unsupported case' + if src.is_stack(): + # use special VLDR for 32bit + self.asm.regalloc_mov(src, r.ip) + src = r.ip + if src.is_imm(): + self.mc.gen_load_int(r.ip.value, src.value) + src = r.ip + if src.is_core_reg(): + self.mc.VMOV_cs(dest.value, src.value) # remap values stored in core registers remap_frame_layout(self.asm, non_float_locs, non_float_regs, r.ip) - # remap values stored in vfp registers - remap_frame_layout(self.asm, float_locs, float_regs, r.vfp_ip) def load_result(self): resloc = self.resloc + if self.restype == 'S': + self.mc.VMOV_sc(resloc.value, r.s0.value) # ensure the result is wellformed and stored in the correct location if resloc is not None and resloc.is_core_reg(): self._ensure_result_bit_extension(resloc, diff --git a/rpython/jit/backend/arm/codebuilder.py b/rpython/jit/backend/arm/codebuilder.py --- a/rpython/jit/backend/arm/codebuilder.py +++ b/rpython/jit/backend/arm/codebuilder.py @@ -178,6 +178,30 @@ | (dm & 0xF)) self.write32(instr) + def VMOV_sc(self, dest, src): + """move a single precision vfp register[src] to a core reg[dest]""" + self._VMOV_32bit(src, dest, to_arm_register=1) + + def VMOV_cs(self, dest, src): + """move a core register[src] to a single precision vfp + register[dest]""" + self._VMOV_32bit(dest, src, to_arm_register=0) + + def _VMOV_32bit(self, float_reg, core_reg, to_arm_register, cond=cond.AL): + """This instruction transfers the contents of a single-precision VFP + register to an ARM core register, or the contents of an ARM core + register to a single-precision VFP register. + """ + instr = (cond << 28 + | 0xE << 24 + | to_arm_register << 20 + | ((float_reg >> 1) & 0xF) << 16 + | core_reg << 12 + | 0xA << 8 + | (float_reg & 0x1) << 7 + | 1 << 4) + self.write32(instr) + def VMOV_cc(self, dd, dm, cond=cond.AL): sz = 1 # for 64-bit mode instr = (cond << 28 @@ -198,8 +222,16 @@ self._VCVT(target, source, cond, 0, 1) def _VCVT(self, target, source, cond, opc2, sz): - D = 0 - M = 0 + # A8.6.295 + to_integer = (opc2 >> 2) & 1 + if to_integer: + D = target & 1 + target >>= 1 + M = (source >> 4) & 1 + else: + M = source & 1 + source >>= 1 + D = (target >> 4) & 1 op = 1 instr = (cond << 28 | 0xEB8 << 16 @@ -216,8 +248,8 @@ def _VCVT_single_double(self, target, source, cond, sz): # double_to_single = (sz == '1'); - D = 0 - M = 0 + D = target & 1 if sz else (target >> 4) & 1 + M = (source >> 4) & 1 if sz else source & 1 instr = (cond << 28 | 0xEB7 << 16 | 0xAC << 4 diff --git a/rpython/jit/backend/arm/locations.py b/rpython/jit/backend/arm/locations.py --- a/rpython/jit/backend/arm/locations.py +++ b/rpython/jit/backend/arm/locations.py @@ -55,12 +55,8 @@ type = FLOAT width = 2 * WORD - def get_single_precision_regs(self): - return [VFPRegisterLocation(i) for i in - [self.value * 2, self.value * 2 + 1]] - def __repr__(self): - return 'vfp%d' % self.value + return 'vfp(d%d)' % self.value def is_core_reg(self): return False @@ -74,6 +70,14 @@ def is_float(self): return True +class SVFPRegisterLocation(VFPRegisterLocation): + """Single Precission VFP Register""" + _immutable_ = True + width = WORD + type = 'S' + + def __repr__(self): + return 'vfp(s%d)' % self.value class ImmLocation(AssemblerLocation): _immutable_ = True diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1102,17 +1102,16 @@ arg, res = arglocs assert arg.is_vfp_reg() assert res.is_core_reg() - self.mc.VCVT_float_to_int(r.vfp_ip.value, arg.value) - self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value) + self.mc.VCVT_float_to_int(r.svfp_ip.value, arg.value) + self.mc.VMOV_sc(res.value, r.svfp_ip.value) return fcond def emit_op_cast_int_to_float(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert res.is_vfp_reg() assert arg.is_core_reg() - self.mc.MOV_ri(r.ip.value, 0) - self.mc.VMOV_cr(res.value, arg.value, r.ip.value) - self.mc.VCVT_int_to_float(res.value, res.value) + self.mc.VMOV_cs(r.svfp_ip.value, arg.value) + self.mc.VCVT_int_to_float(res.value, r.svfp_ip.value) return fcond emit_op_llong_add = gen_emit_float_op('llong_add', 'VADD_i64') @@ -1147,15 +1146,14 @@ arg, res = arglocs assert arg.is_vfp_reg() assert res.is_core_reg() - self.mc.VCVT_f64_f32(r.vfp_ip.value, arg.value) - self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value) + self.mc.VCVT_f64_f32(r.svfp_ip.value, arg.value) + self.mc.VMOV_sc(res.value, r.svfp_ip.value) return fcond def emit_op_cast_singlefloat_to_float(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert res.is_vfp_reg() assert arg.is_core_reg() - self.mc.MOV_ri(r.ip.value, 0) - self.mc.VMOV_cr(res.value, arg.value, r.ip.value) - self.mc.VCVT_f32_f64(res.value, res.value) + self.mc.VMOV_cs(r.svfp_ip.value, arg.value) + self.mc.VCVT_f32_f64(res.value, r.svfp_ip.value) return fcond diff --git a/rpython/jit/backend/arm/registers.py b/rpython/jit/backend/arm/registers.py --- a/rpython/jit/backend/arm/registers.py +++ b/rpython/jit/backend/arm/registers.py @@ -1,8 +1,10 @@ from rpython.jit.backend.arm.locations import VFPRegisterLocation +from rpython.jit.backend.arm.locations import SVFPRegisterLocation from rpython.jit.backend.arm.locations import RegisterLocation registers = [RegisterLocation(i) for i in range(16)] vfpregisters = [VFPRegisterLocation(i) for i in range(16)] +svfpregisters = [SVFPRegisterLocation(i) for i in range(32)] [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15] = registers @@ -10,6 +12,10 @@ [d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15] = vfpregisters +# single precission VFP registers, 32-bit +for i in range(32): + globals()['s%d' % i] = svfpregisters[i] + # aliases for registers fp = r11 ip = r12 @@ -17,6 +23,7 @@ lr = r14 pc = r15 vfp_ip = d15 +svfp_ip = s31 all_regs = [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10] all_vfp_regs = vfpregisters[:-1] @@ -27,6 +34,7 @@ callee_restored_registers = callee_resp + [pc] vfp_argument_regs = caller_vfp_resp = [d0, d1, d2, d3, d4, d5, d6, d7] +svfp_argument_regs = [globals()['s%i' % i] for i in range(16)] callee_vfp_resp = [d8, d9, d10, d11, d12, d13, d14, d15] callee_saved_vfp_registers = callee_vfp_resp diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -22,7 +22,7 @@ supports_floats = True supports_longlong = False # XXX requires an implementation of # read_timestamp that works in user mode - supports_singlefloats = not detect_hardfloat() + supports_singlefloats = True from rpython.jit.backend.arm.arch import JITFRAME_FIXED_SIZE all_reg_indexes = range(len(all_regs)) From noreply at buildbot.pypy.org Tue Nov 19 09:56:33 2013 From: noreply at buildbot.pypy.org (oberstet) Date: Tue, 19 Nov 2013 09:56:33 +0100 (CET) Subject: [pypy-commit] pypy default: deactivate test_multiprocessing until #1644 is resolved - buildslave hangs Message-ID: <20131119085633.8E36F1C0330@cobra.cs.uni-duesseldorf.de> Author: Tobias Oberstein Branch: Changeset: r68237:4cb276ba3e4a Date: 2013-11-18 21:54 +0100 http://bitbucket.org/pypy/pypy/changeset/4cb276ba3e4a/ Log: deactivate test_multiprocessing until #1644 is resolved - buildslave hangs diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # From noreply at buildbot.pypy.org Tue Nov 19 09:56:34 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 19 Nov 2013 09:56:34 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in oberstet/pypy (pull request #200) Message-ID: <20131119085634.CAB121C0330@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68238:4369d6c2378e Date: 2013-11-19 09:56 +0100 http://bitbucket.org/pypy/pypy/changeset/4369d6c2378e/ Log: Merged in oberstet/pypy (pull request #200) deactivate test_multiprocessing until #1644 is resolved - buildslave hangs diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # From noreply at buildbot.pypy.org Tue Nov 19 14:51:10 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 19 Nov 2013 14:51:10 +0100 (CET) Subject: [pypy-commit] pypy ndarray-buffer: a branch where to support the buffer argument to ndarray() Message-ID: <20131119135110.B20421C237F@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ndarray-buffer Changeset: r68239:c1f8cadec802 Date: 2013-11-19 11:39 +0100 http://bitbucket.org/pypy/pypy/changeset/c1f8cadec802/ Log: a branch where to support the buffer argument to ndarray() From noreply at buildbot.pypy.org Tue Nov 19 14:51:12 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 19 Nov 2013 14:51:12 +0100 (CET) Subject: [pypy-commit] pypy ndarray-buffer: first passing test: only buffers which implement get_raw_address are supported of course, because the others can be movable Message-ID: <20131119135112.17E961C237F@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ndarray-buffer Changeset: r68240:b9d813cc6c0a Date: 2013-11-19 12:21 +0100 http://bitbucket.org/pypy/pypy/changeset/b9d813cc6c0a/ Log: first passing test: only buffers which implement get_raw_address are supported of course, because the others can be movable diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1,3 +1,5 @@ +from rpython.rtyper.lltypesystem import rffi +from rpython.rlib.rawstorage import RAW_STORAGE_PTR from pypy.interpreter.error import operationerrfmt, OperationError from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, applevel, \ @@ -1065,13 +1067,27 @@ offset=0, w_strides=None, order='C'): from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray from pypy.module.micronumpy.support import calc_strides - if (offset != 0 or not space.is_none(w_strides) or - not space.is_none(w_buffer)): + if (offset != 0 or not space.is_none(w_strides)): raise OperationError(space.w_NotImplementedError, space.wrap("unsupported param")) + dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) + + if not space.is_none(w_buffer): + buf = space.buffer_w(w_buffer) + try: + raw_ptr = buf.get_raw_address() + except ValueError: + raise OperationError(space.w_TypeError, space.wrap( + "Only raw buffers are supported")) + if not shape: + raise OperationError(space.w_TypeError, space.wrap( + "numpy scalars from buffers not supported yet")) + storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype) + if not shape: return W_NDimArray.new_scalar(space, dtype) if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): @@ -1091,8 +1107,6 @@ Create an array from an existing buffer, given its address as int. PyPy-only implementation detail. """ - from rpython.rtyper.lltypesystem import rffi - from rpython.rlib.rawstorage import RAW_STORAGE_PTR storage = rffi.cast(RAW_STORAGE_PTR, addr) dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -218,7 +218,7 @@ assert get(1, 1) == 3 class AppTestNumArray(BaseNumpyAppTest): - spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii", "array"]) def w_CustomIndexObject(self, index): class CustomIndexObject(object): def __init__(self, index): @@ -2087,6 +2087,17 @@ a = np.ndarray([1], dtype=bool) assert a[0] == True + def test_ndarray_from_buffer(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*2*3) + a = np.ndarray((3,), buffer=buf, dtype='i2') + a[0] = ord('b') + a[1] = ord('a') + a[2] = ord('r') + assert list(buf) == ['b', '\x00', 'a', '\x00', 'r', '\x00'] + + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy From noreply at buildbot.pypy.org Tue Nov 19 14:51:13 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 19 Nov 2013 14:51:13 +0100 (CET) Subject: [pypy-commit] pypy ndarray-buffer: add a new implementation of arrays which keeps alive the original buffer Message-ID: <20131119135113.73BEC1C237F@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ndarray-buffer Changeset: r68241:193128b62e58 Date: 2013-11-19 14:50 +0100 http://bitbucket.org/pypy/pypy/changeset/193128b62e58/ Log: add a new implementation of arrays which keeps alive the original buffer diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -392,7 +392,15 @@ def __del__(self): free_raw_storage(self.storage, track_allocation=False) +class ConcreteArrayWithBase(ConcreteArrayNotOwning): + def __init__(self, shape, dtype, order, strides, backstrides, storage, orig_base): + ConcreteArrayNotOwning.__init__(self, shape, dtype, order, + strides, backstrides, storage) + self.orig_base = orig_base + def base(self): + return self.orig_base + class NonWritableArray(ConcreteArray): def descr_setitem(self, space, orig_array, w_index, w_value): raise OperationError(space.w_ValueError, space.wrap( diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -49,11 +49,18 @@ return W_NDimArray(impl) @staticmethod - def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None): + def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, + w_subtype=None, w_base=None): from pypy.module.micronumpy.arrayimpl import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) - if owning: + if w_base is not None: + if owning: + raise OperationError(space.w_ValueError, + space.wrap("Cannot have owning=True when specifying a buffer")) + impl = concrete.ConcreteArrayWithBase(shape, dtype, order, strides, + backstrides, storage, w_base) + elif owning: # Will free storage when GCd impl = concrete.ConcreteArray(shape, dtype, order, strides, backstrides, storage=storage) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1086,7 +1086,8 @@ raise OperationError(space.w_TypeError, space.wrap( "numpy scalars from buffers not supported yet")) storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr) - return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + w_base=w_buffer) if not shape: return W_NDimArray.new_scalar(space, dtype) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2096,7 +2096,8 @@ a[1] = ord('a') a[2] = ord('r') assert list(buf) == ['b', '\x00', 'a', '\x00', 'r', '\x00'] - + assert a.base is buf + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): From noreply at buildbot.pypy.org Tue Nov 19 15:34:30 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 19 Nov 2013 15:34:30 +0100 (CET) Subject: [pypy-commit] pypy ndarray-buffer: implement the offset param Message-ID: <20131119143430.BF5191C23D2@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ndarray-buffer Changeset: r68242:2187c37bb8b0 Date: 2013-11-19 15:17 +0100 http://bitbucket.org/pypy/pypy/changeset/2187c37bb8b0/ Log: implement the offset param diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1067,15 +1067,15 @@ offset=0, w_strides=None, order='C'): from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray from pypy.module.micronumpy.support import calc_strides - if (offset != 0 or not space.is_none(w_strides)): - raise OperationError(space.w_NotImplementedError, - space.wrap("unsupported param")) - dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) if not space.is_none(w_buffer): + if (not space.is_none(w_strides)): + raise OperationError(space.w_NotImplementedError, + space.wrap("unsupported param")) + buf = space.buffer_w(w_buffer) try: raw_ptr = buf.get_raw_address() @@ -1086,6 +1086,7 @@ raise OperationError(space.w_TypeError, space.wrap( "numpy scalars from buffers not supported yet")) storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr) + storage = rffi.ptradd(storage, offset) return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, w_base=w_buffer) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2098,6 +2098,17 @@ assert list(buf) == ['b', '\x00', 'a', '\x00', 'r', '\x00'] assert a.base is buf + def test_ndarray_from_buffer_and_offset(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*7) + buf[0] = 'X' + a = np.ndarray((3,), buffer=buf, offset=1, dtype='i2') + a[0] = ord('b') + a[1] = ord('a') + a[2] = ord('r') + assert list(buf) == ['X', 'b', '\x00', 'a', '\x00', 'r', '\x00'] + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): From noreply at buildbot.pypy.org Tue Nov 19 15:34:32 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 19 Nov 2013 15:34:32 +0100 (CET) Subject: [pypy-commit] pypy ndarray-buffer: check the size of the buffer Message-ID: <20131119143432.0A2B01C2435@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ndarray-buffer Changeset: r68243:8d2b54062cfc Date: 2013-11-19 15:33 +0100 http://bitbucket.org/pypy/pypy/changeset/8d2b54062cfc/ Log: check the size of the buffer diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -22,6 +22,7 @@ from rpython.rlib.rstring import StringBuilder from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation from pypy.module.micronumpy.conversion_utils import order_converter, multi_axis_converter +from pypy.module.micronumpy import support from pypy.module.micronumpy.constants import * def _find_shape(space, w_size, dtype): @@ -1085,6 +1086,10 @@ if not shape: raise OperationError(space.w_TypeError, space.wrap( "numpy scalars from buffers not supported yet")) + totalsize = support.product(shape) * dtype.get_size() + if totalsize+offset > buf.getlength(): + raise OperationError(space.w_TypeError, space.wrap( + "buffer is too small for requested array")) storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr) storage = rffi.ptradd(storage, offset) return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2109,6 +2109,16 @@ a[2] = ord('r') assert list(buf) == ['X', 'b', '\x00', 'a', '\x00', 'r', '\x00'] + def test_ndarray_from_buffer_out_of_bounds(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*2*10) # 20 bytes + info = raises(TypeError, "np.ndarray((11,), buffer=buf, dtype='i2')") + assert str(info.value).startswith('buffer is too small') + info = raises(TypeError, "np.ndarray((5,), buffer=buf, offset=15, dtype='i2')") + assert str(info.value).startswith('buffer is too small') + + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): From noreply at buildbot.pypy.org Tue Nov 19 16:34:17 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 19 Nov 2013 16:34:17 +0100 (CET) Subject: [pypy-commit] pypy ndarray-buffer: test and fix Message-ID: <20131119153417.78A7B1C2FFE@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ndarray-buffer Changeset: r68244:8593deb29c94 Date: 2013-11-19 16:32 +0100 http://bitbucket.org/pypy/pypy/changeset/8593deb29c94/ Log: test and fix diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1093,6 +1093,7 @@ storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr) storage = rffi.ptradd(storage, offset) return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + w_subtype=w_subtype, w_base=w_buffer) if not shape: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2098,6 +2098,15 @@ assert list(buf) == ['b', '\x00', 'a', '\x00', 'r', '\x00'] assert a.base is buf + def test_ndarray_subclass_from_buffer(self): + import numpypy as np + import array + buf = array.array('c', ['\x00']*2*3) + class X(np.ndarray): + pass + a = X((3,), buffer=buf, dtype='i2') + assert type(a) is X + def test_ndarray_from_buffer_and_offset(self): import numpypy as np import array From noreply at buildbot.pypy.org Tue Nov 19 17:15:55 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 19 Nov 2013 17:15:55 +0100 (CET) Subject: [pypy-commit] stmgc default: document the real reason why we can't use just any h_original and Message-ID: <20131119161555.CC0E31C147C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r549:b89b61f0df98 Date: 2013-11-19 17:15 +0100 http://bitbucket.org/pypy/stmgc/changeset/b89b61f0df98/ Log: document the real reason why we can't use just any h_original and prevent things like stub->stub->stub on public addresses. diff --git a/c4/doc-objects.txt b/c4/doc-objects.txt --- a/c4/doc-objects.txt +++ b/c4/doc-objects.txt @@ -62,6 +62,7 @@ - prebuilt object, never modified 1 - other public object, never modified GT - outdated ptr to a more recent public copy +- stolen protected, made public some PRN Public stubs (have also a ref to one thread): - from stealing ptr (maybe to priv/prot) | 2 @@ -353,3 +354,6 @@ it is a predefined HASH value for this object. This is used by `stm_hash` which otherwise returns a hashed version of the ID of the object. + +DONT ASSUME THE H_ORIGINAL TO BE INITIALIZED. IT MAY HAVE E.G. +H_REVISION SET TO AN OLD, FREED OBJECT. diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -183,7 +183,8 @@ if (v & 2) goto follow_stub; - /* we update P_prev->h_revision as a shortcut */ + /* we update P_prev->h_revision as a shortcut + P_prev->P->v => P_prev->v */ /* XXX check if this really gives a worse performance than only doing this write occasionally based on a counter in d */ P_prev->h_revision = v; @@ -576,6 +577,7 @@ B = stmgc_duplicate_old(P); B->h_tid |= GCFLAG_BACKUP_COPY; B->h_tid &= ~GCFLAG_HAS_ID; + if (!(P->h_original) && (P->h_tid & GCFLAG_OLD)) { /* if P is old, it must be the original if P is young, it will create a shadow original later diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -89,21 +89,30 @@ } assert(obj->h_tid & GCFLAG_OLD); + if (stm_is_registered(obj)) { + /* prevents stub->stub->stub->... */ + /* only increment refcount: */ + stm_register_integer_address((intptr_t)obj); + return (intptr_t)obj; + } + spinlock_acquire(d->public_descriptor->collection_lock, 'P'); /* it must have a h_original */ gcptr orig; - if (obj->h_original == 0 || obj->h_tid & GCFLAG_PREBUILT_ORIGINAL) { + if ((!obj->h_original) || (obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { orig = obj; } else { orig = (gcptr)obj->h_original; } - if ((orig->h_tid & (GCFLAG_PUBLIC | GCFLAG_PREBUILT_ORIGINAL)) - == (GCFLAG_PUBLIC | GCFLAG_PREBUILT_ORIGINAL)) { - /* public is not enough as public stubs may get replaced - by the protected object they point to, if they are in the - same thread (I think...) */ + if ((orig->h_tid & GCFLAG_PUBLIC) + && (obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + /* we can't just use *any* public original because their + h_revision is not kept up-to-date during major collections. + Meaning it can point to some long gone object. + Prebuilt originals, however, always get visited in major + collections. */ result = (intptr_t)orig; } else { @@ -233,10 +242,11 @@ /* must create shadow original object XXX: or use backup, if exists */ gcptr O = (gcptr)stmgcpage_malloc(stmgc_size(p)); - memcpy(O, p, stmgc_size(p)); /* at least major collections - depend on some content of id_copy. - remove after fixing that XXX */ + memcpy(O, p, sizeof(struct stm_object_s)); + O->h_tid |= GCFLAG_OLD; + assert(O->h_original == 0); + assert(O->h_revision = -1); /* debugging */ p->h_original = (revision_t)O; p->h_tid |= GCFLAG_HAS_ID; @@ -245,6 +255,7 @@ gcptr B = (gcptr)p->h_revision; /* not stolen already: */ assert(!(B->h_tid & GCFLAG_PUBLIC)); + assert(!B->h_original); B->h_original = (revision_t)O; } diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -37,6 +37,13 @@ } } +static void check_consistent(gcptr obj) { + if ((obj->h_revision & 3) == 2) + assert(stm_pointer_equal(obj, (gcptr)(obj->h_revision-2))); + else if ((obj->h_revision & 1) == 0) + assert(stm_pointer_equal(obj, (gcptr)(obj->h_revision))); +} + /***** Support code *****/ @@ -217,6 +224,22 @@ /***** registering of small stubs as integer addresses *****/ +_Bool stm_is_registered(gcptr obj) +{ + wlog_t *found; + _Bool res = 0; + + stmgcpage_acquire_global_lock(); + /* find and increment refcount; or insert */ + G2L_FIND(registered_objs, obj, found, goto finish); + found->val = (gcptr)(((revision_t)found->val) + 1); + goto finish; + res = 1; + finish: + stmgcpage_release_global_lock(); + return res; +} + void stm_register_integer_address(intptr_t adr) { /* needs to be inevitable! */ wlog_t *found; @@ -253,6 +276,7 @@ /* become inevitable because we would have to re-register them on abort, but make sure only to re-register if not registered in the same aborted transaction (XXX) */ + /* (obj will not move) */ stm_become_inevitable("stm_unregister_integer_address()"); stmgcpage_acquire_global_lock(); @@ -279,6 +303,10 @@ static gcptr copy_over_original(gcptr obj, gcptr id_copy) { + /* no obj->h_revision = obj->h_original = id_copy */ + assert(!((obj->h_revision <= ((revision_t)id_copy + 2)) && + (obj->h_revision >= ((revision_t)id_copy)))); + assert(obj != id_copy); assert(id_copy == (gcptr)obj->h_original); assert(!(id_copy->h_revision & 1)); /* not head-revision itself */ @@ -540,11 +568,6 @@ == (GCFLAG_MARKED|GCFLAG_VISITED|GCFLAG_PUBLIC)); continue; } - /* else if (R->h_original == 0) { */ - /* /\* the obj is an original and will therefore survive: *\/ */ - /* gcptr V = visit_public(R, NULL); */ - /* assert(V == R); */ - /* } */ else { assert(R->h_tid & GCFLAG_SMALLSTUB); /* only case for now */ /* make sure R stays valid: */ diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -186,6 +186,9 @@ /* priv_from_prot's backup->h_originals already point to id_obj */ + assert(IMPLIES(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED, + ((gcptr)obj->h_revision)->h_original + == obj->h_original)); } else { /* make a copy of it outside */ diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -83,6 +83,7 @@ obj->h_original = (revision_t)stub; stub->h_original = 0; /* stub_malloc does not set to 0... */ if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + assert(!(((gcptr)obj->h_revision)->h_original)); ((gcptr)obj->h_revision)->h_original = (revision_t)stub; } } diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -41,7 +41,7 @@ called on the result (push roots!) */ intptr_t stm_allocate_public_integer_address(gcptr); void stm_unregister_integer_address(intptr_t); /* push roots too! */ - +_Bool stm_is_registered(gcptr); /* returns a never changing hash for the object */ revision_t stm_hash(gcptr); From noreply at buildbot.pypy.org Tue Nov 19 17:17:40 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 19 Nov 2013 17:17:40 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: import stmgc Message-ID: <20131119161740.63AC01C147C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68245:6c38f0067571 Date: 2013-11-19 17:16 +0100 http://bitbucket.org/pypy/pypy/changeset/6c38f0067571/ Log: import stmgc diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -184,7 +184,8 @@ if (v & 2) goto follow_stub; - /* we update P_prev->h_revision as a shortcut */ + /* we update P_prev->h_revision as a shortcut + P_prev->P->v => P_prev->v */ /* XXX check if this really gives a worse performance than only doing this write occasionally based on a counter in d */ P_prev->h_revision = v; @@ -577,6 +578,7 @@ B = stmgc_duplicate_old(P); B->h_tid |= GCFLAG_BACKUP_COPY; B->h_tid &= ~GCFLAG_HAS_ID; + if (!(P->h_original) && (P->h_tid & GCFLAG_OLD)) { /* if P is old, it must be the original if P is young, it will create a shadow original later diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c --- a/rpython/translator/stm/src_stm/extra.c +++ b/rpython/translator/stm/src_stm/extra.c @@ -90,21 +90,30 @@ } assert(obj->h_tid & GCFLAG_OLD); + if (stm_is_registered(obj)) { + /* prevents stub->stub->stub->... */ + /* only increment refcount: */ + stm_register_integer_address((intptr_t)obj); + return (intptr_t)obj; + } + spinlock_acquire(d->public_descriptor->collection_lock, 'P'); /* it must have a h_original */ gcptr orig; - if (obj->h_original == 0 || obj->h_tid & GCFLAG_PREBUILT_ORIGINAL) { + if ((!obj->h_original) || (obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { orig = obj; } else { orig = (gcptr)obj->h_original; } - if ((orig->h_tid & (GCFLAG_PUBLIC | GCFLAG_PREBUILT_ORIGINAL)) - == (GCFLAG_PUBLIC | GCFLAG_PREBUILT_ORIGINAL)) { - /* public is not enough as public stubs may get replaced - by the protected object they point to, if they are in the - same thread (I think...) */ + if ((orig->h_tid & GCFLAG_PUBLIC) + && (obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + /* we can't just use *any* public original because their + h_revision is not kept up-to-date during major collections. + Meaning it can point to some long gone object. + Prebuilt originals, however, always get visited in major + collections. */ result = (intptr_t)orig; } else { @@ -234,10 +243,11 @@ /* must create shadow original object XXX: or use backup, if exists */ gcptr O = (gcptr)stmgcpage_malloc(stmgc_size(p)); - memcpy(O, p, stmgc_size(p)); /* at least major collections - depend on some content of id_copy. - remove after fixing that XXX */ + memcpy(O, p, sizeof(struct stm_object_s)); + O->h_tid |= GCFLAG_OLD; + assert(O->h_original == 0); + assert(O->h_revision = -1); /* debugging */ p->h_original = (revision_t)O; p->h_tid |= GCFLAG_HAS_ID; @@ -246,6 +256,7 @@ gcptr B = (gcptr)p->h_revision; /* not stolen already: */ assert(!(B->h_tid & GCFLAG_PUBLIC)); + assert(!B->h_original); B->h_original = (revision_t)O; } diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c --- a/rpython/translator/stm/src_stm/gcpage.c +++ b/rpython/translator/stm/src_stm/gcpage.c @@ -38,6 +38,13 @@ } } +static void check_consistent(gcptr obj) { + if ((obj->h_revision & 3) == 2) + assert(stm_pointer_equal(obj, (gcptr)(obj->h_revision-2))); + else if ((obj->h_revision & 1) == 0) + assert(stm_pointer_equal(obj, (gcptr)(obj->h_revision))); +} + /***** Support code *****/ @@ -218,6 +225,22 @@ /***** registering of small stubs as integer addresses *****/ +_Bool stm_is_registered(gcptr obj) +{ + wlog_t *found; + _Bool res = 0; + + stmgcpage_acquire_global_lock(); + /* find and increment refcount; or insert */ + G2L_FIND(registered_objs, obj, found, goto finish); + found->val = (gcptr)(((revision_t)found->val) + 1); + goto finish; + res = 1; + finish: + stmgcpage_release_global_lock(); + return res; +} + void stm_register_integer_address(intptr_t adr) { /* needs to be inevitable! */ wlog_t *found; @@ -254,6 +277,7 @@ /* become inevitable because we would have to re-register them on abort, but make sure only to re-register if not registered in the same aborted transaction (XXX) */ + /* (obj will not move) */ stm_become_inevitable("stm_unregister_integer_address()"); stmgcpage_acquire_global_lock(); @@ -280,6 +304,10 @@ static gcptr copy_over_original(gcptr obj, gcptr id_copy) { + /* no obj->h_revision = obj->h_original = id_copy */ + assert(!((obj->h_revision <= ((revision_t)id_copy + 2)) && + (obj->h_revision >= ((revision_t)id_copy)))); + assert(obj != id_copy); assert(id_copy == (gcptr)obj->h_original); assert(!(id_copy->h_revision & 1)); /* not head-revision itself */ @@ -541,11 +569,6 @@ == (GCFLAG_MARKED|GCFLAG_VISITED|GCFLAG_PUBLIC)); continue; } - /* else if (R->h_original == 0) { */ - /* /\* the obj is an original and will therefore survive: *\/ */ - /* gcptr V = visit_public(R, NULL); */ - /* assert(V == R); */ - /* } */ else { assert(R->h_tid & GCFLAG_SMALLSTUB); /* only case for now */ /* make sure R stays valid: */ diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c --- a/rpython/translator/stm/src_stm/nursery.c +++ b/rpython/translator/stm/src_stm/nursery.c @@ -187,6 +187,9 @@ /* priv_from_prot's backup->h_originals already point to id_obj */ + assert(IMPLIES(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED, + ((gcptr)obj->h_revision)->h_original + == obj->h_original)); } else { /* make a copy of it outside */ diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -68677625f2be +b89b61f0df98 diff --git a/rpython/translator/stm/src_stm/steal.c b/rpython/translator/stm/src_stm/steal.c --- a/rpython/translator/stm/src_stm/steal.c +++ b/rpython/translator/stm/src_stm/steal.c @@ -84,6 +84,7 @@ obj->h_original = (revision_t)stub; stub->h_original = 0; /* stub_malloc does not set to 0... */ if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + assert(!(((gcptr)obj->h_revision)->h_original)); ((gcptr)obj->h_revision)->h_original = (revision_t)stub; } } diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -42,7 +42,7 @@ called on the result (push roots!) */ intptr_t stm_allocate_public_integer_address(gcptr); void stm_unregister_integer_address(intptr_t); /* push roots too! */ - +_Bool stm_is_registered(gcptr); /* returns a never changing hash for the object */ revision_t stm_hash(gcptr); From noreply at buildbot.pypy.org Tue Nov 19 19:34:55 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 19 Nov 2013 19:34:55 +0100 (CET) Subject: [pypy-commit] pypy numpypy-array_prepare_-array_wrap: Merge default Message-ID: <20131119183455.24BAF1C147C@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-array_prepare_-array_wrap Changeset: r68246:4d92d4a9ffe3 Date: 2013-11-19 19:33 +0100 http://bitbucket.org/pypy/pypy/changeset/4d92d4a9ffe3/ Log: Merge default diff too long, truncating to 2000 out of 28531 lines diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,7 +1,38 @@ all: pypy-c +PYPY_EXECUTABLE := $(shell which pypy) +URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") + +ifeq ($(PYPY_EXECUTABLE),) +RUNINTERP = python +else +RUNINTERP = $(PYPY_EXECUTABLE) +endif + pypy-c: - @echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM" - @sleep 3 - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + @echo + @echo "====================================================================" +ifeq ($(PYPY_EXECUTABLE),) + @echo "Building a regular (jitting) version of PyPy, using CPython." + @echo "This takes around 2 hours and $(URAM) GB of RAM." + @echo "Note that pre-installing a PyPy binary would reduce this time" + @echo "and produce basically the same result." +else + @echo "Building a regular (jitting) version of PyPy, using" + @echo "$(PYPY_EXECUTABLE) to run the translation itself." + @echo "This takes up to 1 hour and $(URAM) GB of RAM." +endif + @echo + @echo "For more control (e.g. to use multiple CPU cores during part of" + @echo "the process) you need to run \`\`rpython/bin/rpython'' directly." + @echo "For more information see \`\`http://pypy.org/download.html''." + @echo "====================================================================" + @echo + @sleep 5 + $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + +# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are +# replaced with an opaque --jobserver option by the time this Makefile +# runs. We cannot get their original value either: +# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -1,5 +1,5 @@ ===================================== -PyPy: Python in Python Implementation +PyPy: Python in Python Implementation ===================================== Welcome to PyPy! @@ -26,12 +26,14 @@ Building ======== -build with:: +build with: - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py +.. code-block:: console + + $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest -to use virtualenv with the resulting pypy-c as the interpreter, you can +to use virtualenv with the resulting pypy-c as the interpreter; you can find more details about various installation schemes here: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -4,6 +4,21 @@ from __pypy__.builders import StringBuilder, UnicodeBuilder +class StringOrUnicodeBuilder(object): + def __init__(self): + self._builder = StringBuilder() + def append(self, string): + try: + self._builder.append(string) + except UnicodeEncodeError: + ub = UnicodeBuilder() + ub.append(self._builder.build()) + self._builder = ub + ub.append(string) + def build(self): + return self._builder.build() + + ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') @@ -192,7 +207,7 @@ if self.ensure_ascii: builder = StringBuilder() else: - builder = UnicodeBuilder() + builder = StringOrUnicodeBuilder() self.__encode(o, markers, builder, 0) return builder.build() diff --git a/lib-python/2.7/string.py b/lib-python/2.7/string.py --- a/lib-python/2.7/string.py +++ b/lib-python/2.7/string.py @@ -66,16 +66,17 @@ must be of the same length. """ - if len(fromstr) != len(tostr): + n = len(fromstr) + if n != len(tostr): raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) + # this function has been rewritten to suit PyPy better; it is + # almost 10x faster than the original. + buf = bytearray(256) + for i in range(256): + buf[i] = i + for i in range(n): + buf[ord(fromstr[i])] = tostr[i] + return str(buf) diff --git a/lib-python/2.7/test/test_mailbox.py b/lib-python/2.7/test/test_mailbox.py --- a/lib-python/2.7/test/test_mailbox.py +++ b/lib-python/2.7/test/test_mailbox.py @@ -38,14 +38,9 @@ def _delete_recursively(self, target): # Delete a file or delete a directory recursively if os.path.isdir(target): - for path, dirs, files in os.walk(target, topdown=False): - for name in files: - os.remove(os.path.join(path, name)) - for name in dirs: - os.rmdir(os.path.join(path, name)) - os.rmdir(target) + test_support.rmtree(target) elif os.path.exists(target): - os.remove(target) + test_support.unlink(target) class TestMailbox(TestBase): @@ -137,6 +132,7 @@ msg = self._box.get(key1) self.assertEqual(msg['from'], 'foo') self.assertEqual(msg.fp.read(), '1') + msg.fp.close() def test_getitem(self): # Retrieve message using __getitem__() @@ -169,10 +165,14 @@ # Get file representations of messages key0 = self._box.add(self._template % 0) key1 = self._box.add(_sample_message) - self.assertEqual(self._box.get_file(key0).read().replace(os.linesep, '\n'), + msg0 = self._box.get_file(key0) + self.assertEqual(msg0.read().replace(os.linesep, '\n'), self._template % 0) - self.assertEqual(self._box.get_file(key1).read().replace(os.linesep, '\n'), + msg1 = self._box.get_file(key1) + self.assertEqual(msg1.read().replace(os.linesep, '\n'), _sample_message) + msg0.close() + msg1.close() def test_get_file_can_be_closed_twice(self): # Issue 11700 @@ -407,6 +407,7 @@ self._box.add(contents[0]) self._box.add(contents[1]) self._box.add(contents[2]) + oldbox = self._box method() if should_call_close: self._box.close() @@ -415,6 +416,7 @@ self.assertEqual(len(keys), 3) for key in keys: self.assertIn(self._box.get_string(key), contents) + oldbox.close() def test_dump_message(self): # Write message representations to disk @@ -1835,6 +1837,10 @@ def setUp(self): # create a new maildir mailbox to work with: self._dir = test_support.TESTFN + if os.path.isdir(self._dir): + test_support.rmtree(self._dir) + if os.path.isfile(self._dir): + test_support.unlink(self._dir) os.mkdir(self._dir) os.mkdir(os.path.join(self._dir, "cur")) os.mkdir(os.path.join(self._dir, "tmp")) @@ -1844,10 +1850,10 @@ def tearDown(self): map(os.unlink, self._msgfiles) - os.rmdir(os.path.join(self._dir, "cur")) - os.rmdir(os.path.join(self._dir, "tmp")) - os.rmdir(os.path.join(self._dir, "new")) - os.rmdir(self._dir) + test_support.rmdir(os.path.join(self._dir, "cur")) + test_support.rmdir(os.path.join(self._dir, "tmp")) + test_support.rmdir(os.path.join(self._dir, "new")) + test_support.rmdir(self._dir) def createMessage(self, dir, mbox=False): t = int(time.time() % 1000000) @@ -1883,7 +1889,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1891,7 +1899,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1900,8 +1910,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 2) - self.assertIsNot(self.mbox.next(), None) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1910,11 +1924,13 @@ import email.parser fname = self.createMessage("cur", True) n = 0 - for msg in mailbox.PortableUnixMailbox(open(fname), + fid = open(fname) + for msg in mailbox.PortableUnixMailbox(fid, email.parser.Parser().parse): n += 1 self.assertEqual(msg["subject"], "Simple Test") self.assertEqual(len(str(msg)), len(FROM_)+len(DUMMY_MESSAGE)) + fid.close() self.assertEqual(n, 1) ## End: classes from the original module (for backward compatibility). diff --git a/lib-python/2.7/test/test_mmap.py b/lib-python/2.7/test/test_mmap.py --- a/lib-python/2.7/test/test_mmap.py +++ b/lib-python/2.7/test/test_mmap.py @@ -11,7 +11,7 @@ def setUp(self): if os.path.exists(TESTFN): - os.unlink(TESTFN) + unlink(TESTFN) def tearDown(self): try: diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # diff --git a/lib-python/2.7/test/test_old_mailbox.py b/lib-python/2.7/test/test_old_mailbox.py --- a/lib-python/2.7/test/test_old_mailbox.py +++ b/lib-python/2.7/test/test_old_mailbox.py @@ -73,7 +73,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -81,7 +83,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -90,8 +94,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 2) - self.assertTrue(self.mbox.next() is not None) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -75,7 +75,7 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + open(name, "w").close() self.files.append(name) def test_tempnam(self): diff --git a/lib-python/2.7/test/test_support.py b/lib-python/2.7/test/test_support.py --- a/lib-python/2.7/test/test_support.py +++ b/lib-python/2.7/test/test_support.py @@ -179,15 +179,79 @@ except KeyError: pass +if sys.platform.startswith("win"): + def _waitfor(func, pathname, waitall=False): + # Peform the operation + func(pathname) + # Now setup the wait loop + if waitall: + dirname = pathname + else: + dirname, name = os.path.split(pathname) + dirname = dirname or '.' + # Check for `pathname` to be removed from the filesystem. + # The exponential backoff of the timeout amounts to a total + # of ~1 second after which the deletion is probably an error + # anyway. + # Testing on a i7 at 4.3GHz shows that usually only 1 iteration is + # required when contention occurs. + timeout = 0.001 + while timeout < 1.0: + # Note we are only testing for the existance of the file(s) in + # the contents of the directory regardless of any security or + # access rights. If we have made it this far, we have sufficient + # permissions to do that much using Python's equivalent of the + # Windows API FindFirstFile. + # Other Windows APIs can fail or give incorrect results when + # dealing with files that are pending deletion. + L = os.listdir(dirname) + if not (L if waitall else name in L): + return + # Increase the timeout and try again + time.sleep(timeout) + timeout *= 2 + warnings.warn('tests may fail, delete still pending for ' + pathname, + RuntimeWarning, stacklevel=4) + + def _unlink(filename): + _waitfor(os.unlink, filename) + + def _rmdir(dirname): + _waitfor(os.rmdir, dirname) + + def _rmtree(path): + def _rmtree_inner(path): + for name in os.listdir(path): + fullname = os.path.join(path, name) + if os.path.isdir(fullname): + _waitfor(_rmtree_inner, fullname, waitall=True) + os.rmdir(fullname) + else: + os.unlink(fullname) + _waitfor(_rmtree_inner, path, waitall=True) + _waitfor(os.rmdir, path) +else: + _unlink = os.unlink + _rmdir = os.rmdir + _rmtree = shutil.rmtree + def unlink(filename): try: - os.unlink(filename) + _unlink(filename) except OSError: pass +def rmdir(dirname): + try: + _rmdir(dirname) + except OSError as error: + # The directory need not exist. + if error.errno != errno.ENOENT: + raise + def rmtree(path): try: - shutil.rmtree(path) + _rmtree(path) except OSError, e: # Unix returns ENOENT, Windows returns ESRCH. if e.errno not in (errno.ENOENT, errno.ESRCH): diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/2.7/test/test_tarfile.py --- a/lib-python/2.7/test/test_tarfile.py +++ b/lib-python/2.7/test/test_tarfile.py @@ -300,26 +300,21 @@ def test_extract_hardlink(self): # Test hardlink extraction (e.g. bug #857297). - tar = tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") + with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar: + tar.extract("ustar/regtype", TEMPDIR) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/regtype")) - tar.extract("ustar/regtype", TEMPDIR) - try: tar.extract("ustar/lnktype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("hardlink not extracted properly") + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/lnktype")) + with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) - data = open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) - - try: tar.extract("ustar/symtype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("symlink not extracted properly") - - data = open(os.path.join(TEMPDIR, "ustar/symtype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/symtype")) + with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) def test_extractall(self): # Test if extractall() correctly restores directory permissions @@ -340,7 +335,7 @@ # constructor in case of an error. For the test we rely on # the fact that opening an empty file raises a ReadError. empty = os.path.join(TEMPDIR, "empty") - open(empty, "wb").write("") + open(empty, "wb").close() try: tar = object.__new__(tarfile.TarFile) @@ -351,7 +346,7 @@ else: self.fail("ReadError not raised") finally: - os.remove(empty) + test_support.unlink(empty) class StreamReadTest(CommonReadTest): @@ -1327,7 +1322,7 @@ def setUp(self): self.tarname = tmpname if os.path.exists(self.tarname): - os.remove(self.tarname) + test_support.unlink(self.tarname) def _add_testfile(self, fileobj=None): tar = tarfile.open(self.tarname, "a", fileobj=fileobj) diff --git a/lib-python/2.7/traceback.py b/lib-python/2.7/traceback.py --- a/lib-python/2.7/traceback.py +++ b/lib-python/2.7/traceback.py @@ -107,7 +107,7 @@ return list -def print_exception(etype, value, tb, limit=None, file=None): +def print_exception(etype, value, tb, limit=None, file=None, _encoding=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if @@ -123,7 +123,7 @@ if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) - lines = format_exception_only(etype, value) + lines = format_exception_only(etype, value, _encoding) for line in lines: _print(file, line, '') @@ -144,7 +144,7 @@ list = list + format_exception_only(etype, value) return list -def format_exception_only(etype, value): +def format_exception_only(etype, value, _encoding=None): """Format the exception part of a traceback. The arguments are the exception type and value such as given by @@ -170,12 +170,12 @@ if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or etype is None or type(etype) is str): - return [_format_final_exc_line(etype, value)] + return [_format_final_exc_line(etype, value, _encoding)] stype = etype.__name__ if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] + return [_format_final_exc_line(stype, value, _encoding)] # It was a syntax error; show exactly where the problem was found. lines = [] @@ -196,26 +196,26 @@ lines.append(' %s^\n' % ''.join(caretspace)) value = msg - lines.append(_format_final_exc_line(stype, value)) + lines.append(_format_final_exc_line(stype, value, _encoding)) return lines -def _format_final_exc_line(etype, value): +def _format_final_exc_line(etype, value, _encoding=None): """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) + valuestr = _some_str(value, _encoding) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line -def _some_str(value): +def _some_str(value, _encoding=None): try: return str(value) except Exception: pass try: value = unicode(value) - return value.encode("ascii", "backslashreplace") + return value.encode(_encoding or "ascii", "backslashreplace") except Exception: pass return '' % type(value).__name__ diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -48,7 +48,14 @@ def remove(wr, selfref=ref(self)): self = selfref() if self is not None: - del self.data[wr.key] + # Changed this for PyPy: made more resistent. The + # issue is that in some corner cases, self.data + # might already be changed or removed by the time + # this weakref's callback is called. If that is + # the case, we don't want to randomly kill an + # unrelated entry. + if self.data.get(wr.key) is wr: + del self.data[wr.key] self._remove = remove UserDict.UserDict.__init__(self, *args, **kw) @@ -160,22 +167,26 @@ try: o = self.data.pop(key)() except KeyError: + o = None + if o is None: if args: return args[0] - raise - if o is None: raise KeyError, key else: return o + # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: - wr = self.data[key] + o = self.data[key]() except KeyError: + o = None + if o is None: self.data[key] = KeyedRef(default, self._remove, key) return default else: - return wr() + return o + # The logic above was fixed in PyPy def update(self, dict=None, **kwargs): d = self.data diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -268,10 +268,18 @@ if _has_load_extension(): _ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);") -_lib = _ffi.verify(""" -#include -""", libraries=['sqlite3'] -) +if sys.platform.startswith('freebsd'): + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'] + ) +else: + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'] + ) exported_sqlite_symbols = [ 'SQLITE_ALTER_TABLE', diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -112,6 +112,14 @@ incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform.startswith("freebsd"): + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include'] + linklibs = ['tk86', 'tcl86'] + libdirs = ['/usr/local/lib'] +elif sys.platform == 'win32': + incdirs = [] + linklibs = ['tcl85', 'tk85'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.7 +Version: 0.8 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7.2" -__version_info__ = (0, 7, 2) +__version__ = "0.8" +__version_info__ = (0, 8) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,5 @@ import types +from .lock import allocate_lock try: callable @@ -61,6 +62,7 @@ # rely on it! It's probably not going to work well.) self._backend = backend + self._lock = allocate_lock() self._parser = cparser.Parser() self._cached_btypes = {} self._parsed_types = types.ModuleType('parsed_types').__dict__ @@ -74,7 +76,8 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - self.BVoidP = self._get_cached_btype(model.voidp_type) + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -95,11 +98,12 @@ if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') - self._parser.parse(csource, override=override) - self._cdefsources.append(csource) - if override: - for cache in self._function_caches: - cache.clear() + with self._lock: + self._parser.parse(csource, override=override) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -109,31 +113,47 @@ library we only look for the actual (untyped) symbols. """ assert isinstance(name, basestring) or name is None - lib, function_cache = _make_ffi_library(self, name, flags) - self._function_caches.append(function_cache) - self._libraries.append(lib) + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) return lib + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + really_a_function_type = type.is_raw_function + if really_a_function_type: + type = type.as_function_pointer() + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, cfaf = self._parsed_types[cdecl] - if consider_function_as_funcptr and not cfaf: - raise KeyError + result = self._parsed_types[cdecl] except KeyError: - key = cdecl - if not isinstance(cdecl, str): # unicode, on Python 2 - cdecl = cdecl.encode('ascii') - cfaf = consider_function_as_funcptr - type = self._parser.parse_type(cdecl, - consider_function_as_funcptr=cfaf) - btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, cfaf + with self._lock: + result = self._typeof_locked(cdecl) + # + btype, really_a_function_type = result + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) return btype def typeof(self, cdecl): """Parse the C type given as a string and return the - corresponding Python type: '>. + corresponding object. It can also be used on 'cdata' instance to get its C type. """ if isinstance(cdecl, basestring): @@ -144,6 +164,10 @@ res = _builtin_function_type(cdecl) if res is not None: return res + if (isinstance(cdecl, types.FunctionType) + and hasattr(cdecl, '_cffi_base_type')): + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) def sizeof(self, cdecl): @@ -280,14 +304,17 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + with self._lock: + try: + gc_weakrefs = self.gc_weakrefs + except AttributeError: + from .gc_weakref import GcWeakrefs + gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) + return gc_weakrefs.build(cdata, destructor) def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! try: BType = self._cached_btypes[type] except KeyError: @@ -320,9 +347,13 @@ errno = property(_get_errno, _set_errno, None, "the value of 'errno' from/to the C calls") + def getwinerror(self, code=-1): + return self._backend.getwinerror(code) + def _pointer_to(self, ctype): from . import model - return model.pointer_cache(self, ctype) + with self._lock: + return model.pointer_cache(self, ctype) def addressof(self, cdata, field=None): """Return the address of a . @@ -342,10 +373,12 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ - self._parser.include(ffi_to_include._parser) - self._cdefsources.append('[') - self._cdefsources.extend(ffi_to_include._cdefsources) - self._cdefsources.append(']') + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) @@ -372,7 +405,7 @@ backendlib = backend.load_library(path, flags) copied_enums = [] # - def make_accessor(name): + def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: tp = ffi._parser._declarations[key] @@ -404,11 +437,17 @@ if enumname not in library.__dict__: library.__dict__[enumname] = enumval copied_enums.append(True) + if name in library.__dict__: + return # - if name in library.__dict__: # copied from an enum value just above, - return # or multithread's race condition raise AttributeError(name) # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + make_accessor_locked(name) + # class FFILibrary(object): def __getattr__(self, name): make_accessor(name) @@ -444,4 +483,5 @@ except (KeyError, AttributeError, TypeError): return None else: - return ffi._get_cached_btype(tp) + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -142,7 +142,7 @@ if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] if line: - msg = 'cannot parse "%s"\n%s' % (line, msg) + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) @@ -217,19 +217,18 @@ # if decl.name: tp = self._get_type(node, partial_length_ok=True) - if self._is_constant_declaration(node): + if self._is_constant_globalvar(node): self._declare('constant ' + decl.name, tp) else: self._declare('variable ' + decl.name, tp) - def parse_type(self, cdecl, consider_function_as_funcptr=False): + def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type, - consider_function_as_funcptr=consider_function_as_funcptr) + return self._get_type(exprnode.type) def _declare(self, name, obj): if name in self._declarations: @@ -249,28 +248,17 @@ return model.ConstPointerType(type) return model.PointerType(type) - def _get_type(self, typenode, convert_array_to_pointer=False, - name=None, partial_length_ok=False, - consider_function_as_funcptr=False): + def _get_type(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): type = self._declarations['typedef ' + typenode.type.names[0]] - if isinstance(type, model.ArrayType): - if convert_array_to_pointer: - return type.item - else: - if (consider_function_as_funcptr and - isinstance(type, model.RawFunctionType)): - return type.as_function_pointer() return type # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type - if convert_array_to_pointer: - return self._get_type_pointer(self._get_type(typenode.type)) if typenode.dim is None: length = None else: @@ -331,10 +319,7 @@ # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - result = self._parse_function_type(typenode, name) - if consider_function_as_funcptr: - result = result.as_function_pointer() - return result + return self._parse_function_type(typenode, name) # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): @@ -365,21 +350,24 @@ isinstance(params[0].type.type, pycparser.c_ast.IdentifierType) and list(params[0].type.type.names) == ['void']): del params[0] - args = [self._get_type(argdeclnode.type, - convert_array_to_pointer=True, - consider_function_as_funcptr=True) + args = [self._as_func_arg(self._get_type(argdeclnode.type)) for argdeclnode in params] result = self._get_type(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _is_constant_declaration(self, typenode, const=False): - if isinstance(typenode, pycparser.c_ast.ArrayDecl): - return self._is_constant_declaration(typenode.type) + def _as_func_arg(self, type): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _is_constant_globalvar(self, typenode): if isinstance(typenode, pycparser.c_ast.PtrDecl): - const = 'const' in typenode.quals - return self._is_constant_declaration(typenode.type, const) + return 'const' in typenode.quals if isinstance(typenode, pycparser.c_ast.TypeDecl): - return const or 'const' in typenode.quals + return 'const' in typenode.quals return False def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): @@ -491,7 +479,7 @@ return tp def _make_partial(self, tp, nested): - if not isinstance(tp, model.StructType): + if not isinstance(tp, model.StructOrUnion): raise api.CDefError("%s cannot be partial" % (tp,)) if not tp.has_c_name() and not nested: raise NotImplementedError("%s is partial but has no C name" %(tp,)) @@ -511,7 +499,7 @@ if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): self._partial_length = True - return None + return '...' # raise api.FFIError("unsupported expression: expected a " "simple numeric constant") diff --git a/lib_pypy/cffi/gc_weakref.py b/lib_pypy/cffi/gc_weakref.py --- a/lib_pypy/cffi/gc_weakref.py +++ b/lib_pypy/cffi/gc_weakref.py @@ -14,6 +14,6 @@ def build(self, cdata, destructor): # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi.typeof(cdata), cdata) + new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) self.data[ref(new_cdata, self.remove)] = destructor, cdata return new_cdata diff --git a/lib_pypy/cffi/lock.py b/lib_pypy/cffi/lock.py new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,7 +1,10 @@ import weakref +from .lock import allocate_lock + class BaseTypeByIdentity(object): is_array_type = False + is_raw_function = False def get_c_name(self, replace_with='', context='a C file'): result = self.c_name_with_marker @@ -146,6 +149,7 @@ # a function, but not a pointer-to-function. The backend has no # notion of such a type; it's used temporarily by parsing. _base_pattern = '(&)(%s)' + is_raw_function = True def build_backend_type(self, ffi, finishlist): from . import api @@ -192,10 +196,6 @@ _base_pattern = " const *&" _base_pattern_array = "(const *&)" - def build_backend_type(self, ffi, finishlist): - BPtr = PointerType(self.totype).get_cached_btype(ffi, finishlist) - return BPtr - const_voidp_type = ConstPointerType(void_type) @@ -216,10 +216,12 @@ self.item = item self.length = length # - if self.length is None: + if length is None: brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' else: - brackets = '&[%d]' % self.length + brackets = '&[%d]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) @@ -227,6 +229,10 @@ return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): + if self.length == '...': + from . import api + raise api.CDefError("cannot render the type %r: unknown length" % + (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) @@ -252,6 +258,7 @@ class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None completed = False + partial = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -303,20 +310,21 @@ return # not completing it: it's an opaque struct # self.completed = 1 - fldtypes = tuple(tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes) # if self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) ffi._backend.complete_struct_or_union(BType, lst, self) # else: + fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # - if isinstance(ftype, ArrayType) and ftype.length is None: + if isinstance(ftype, ArrayType) and ftype.length == '...': # fix the length to match the total size BItemType = ftype.item.get_cached_btype(ffi, finishlist) nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) @@ -327,18 +335,20 @@ ftype = ftype.resolve_length(nlen) self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) - BArrayType = ftype.get_cached_btype(ffi, finishlist) - fldtypes = (fldtypes[:i] + (BArrayType,) + - fldtypes[i+1:]) - continue # - bitemsize = ffi.sizeof(fldtypes[i]) - if bitemsize != fsize: - self._verification_error( - "field '%s.%s' is declared as %d bytes, but is " - "really %d bytes" % (self.name, - self.fldnames[i] or '{}', - bitemsize, fsize)) + BFieldType = ftype.get_cached_btype(ffi, finishlist) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) + fldtypes.append(BFieldType) + # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) ffi._backend.complete_struct_or_union(BType, lst, self, totalsize, totalalignment) @@ -348,11 +358,6 @@ from .ffiplatform import VerificationError raise VerificationError(msg) - -class StructType(StructOrUnion): - kind = 'struct' - partial = False - def check_not_partial(self): if self.partial and self.fixedlayout is None: from . import ffiplatform @@ -361,19 +366,18 @@ def build_backend_type(self, ffi, finishlist): self.check_not_partial() finishlist.append(self) - - return global_cache(self, ffi, 'new_struct_type', + # + return global_cache(self, ffi, 'new_%s_type' % self.kind, self.get_official_name(), key=self) +class StructType(StructOrUnion): + kind = 'struct' + + class UnionType(StructOrUnion): kind = 'union' - def build_backend_type(self, ffi, finishlist): - finishlist.append(self) - return global_cache(self, ffi, 'new_union_type', - self.get_official_name(), key=self) - class EnumType(StructOrUnionOrEnum): kind = 'enum' @@ -387,6 +391,12 @@ self.baseinttype = baseinttype self.build_c_name_with_marker() + def force_the_name(self, forcename): + StructOrUnionOrEnum.force_the_name(self, forcename) + if self.forcename is None: + name = self.get_official_name() + self.forcename = '$' + name.replace(' ', '_') + def check_not_partial(self): if self.partial and not self.partial_resolved: from . import ffiplatform @@ -444,6 +454,9 @@ tp = StructType(structname, None, None, None) return NamedPointerType(tp, name) + +global_lock = allocate_lock() + def global_cache(srctype, ffi, funcname, *args, **kwds): key = kwds.pop('key', (funcname, args)) assert not kwds @@ -464,8 +477,17 @@ res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: raise NotImplementedError("%r: %s" % (srctype, e)) - ffi._backend.__typecache[key] = res - return res + # note that setdefault() on WeakValueDictionary is not atomic + # and contains a rare bug (http://bugs.python.org/issue19542); + # we have to use a lock and do it ourselves + cache = ffi._backend.__typecache + with global_lock: + res1 = cache.get(key) + if res1 is None: + cache[key] = res + return res + else: + return res1 def pointer_cache(ffi, BType): return global_cache('?', ffi, 'new_pointer_type', BType) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -15,7 +15,7 @@ def patch_extension_kwds(self, kwds): pass - def find_module(self, module_name, path, so_suffix): + def find_module(self, module_name, path, so_suffixes): try: f, filename, descr = imp.find_module(module_name, path) except ImportError: @@ -25,7 +25,7 @@ # Note that after a setuptools installation, there are both .py # and .so files with the same basename. The code here relies on # imp.find_module() locating the .so in priority. - if descr[0] != so_suffix: + if descr[0] not in so_suffixes: return None return filename @@ -160,7 +160,10 @@ def __dir__(self): return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() - module._cffi_setup(lst, ffiplatform.VerificationError, library) + if module._cffi_setup(lst, ffiplatform.VerificationError, library): + import warnings + warnings.warn("reimporting %r might overwrite older definitions" + % (self.verifier.get_module_name())) # # finally, call the loaded_cpy_xxx() functions. This will perform # the final adjustments, like copying the Python->C wrapper @@ -280,8 +283,8 @@ return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, model.ArrayType): - return '_cffi_from_c_deref((char *)%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) elif isinstance(tp, model.StructType): if tp.fldnames is None: raise TypeError("'%s' is used as %s, but is opaque" % ( @@ -464,11 +467,14 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return _cffi_get_struct_layout(nums);') @@ -491,7 +497,7 @@ # function = getattr(module, layoutfuncname) layout = function() - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -528,9 +534,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -566,7 +573,7 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True): + vartp=None, delayed=True, size_too=False): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -597,6 +604,15 @@ '(unsigned long long)(%s));' % (name,)) prnt(' if (o == NULL)') prnt(' return -1;') + if size_too: + prnt(' {') + prnt(' PyObject *o1 = o;') + prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));' + % (name,)) + prnt(' Py_DECREF(o1);') + prnt(' if (o == NULL)') + prnt(' return -1;') + prnt(' }') prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name) prnt(' Py_DECREF(o);') prnt(' if (res < 0)') @@ -633,12 +649,23 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %d, ' - 'not %d",') - prnt(' "%s", "%s", (int)%s, %d);' % ( - name, enumerator, enumerator, enumvalue)) + prnt(' "enum %s: %s has the real value %s, ' + 'not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + name, enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return %s;' % self._chained_list_constants[True]) @@ -677,15 +704,16 @@ def _generate_cpy_variable_collecttype(self, tp, name): if isinstance(tp, model.ArrayType): - self._do_collect_type(tp) + tp_ptr = model.PointerType(tp.item) else: tp_ptr = model.PointerType(tp) - self._do_collect_type(tp_ptr) + self._do_collect_type(tp_ptr) def _generate_cpy_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): tp_ptr = model.PointerType(tp.item) - self._generate_cpy_const(False, name, tp, vartp=tp_ptr) + self._generate_cpy_const(False, name, tp, vartp=tp_ptr, + size_too = (tp.length == '...')) else: tp_ptr = model.PointerType(tp) self._generate_cpy_const(False, name, tp_ptr, category='var') @@ -694,11 +722,29 @@ _loading_cpy_variable = _loaded_noop def _loaded_cpy_variable(self, tp, name, module, library): + value = getattr(library, name) if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the - return # sense that "a=..." is forbidden + # sense that "a=..." is forbidden + if tp.length == '...': + assert isinstance(value, tuple) + (value, size) = value + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. - ptr = getattr(library, name) + ptr = value delattr(library, name) def getter(library): return ptr[0] @@ -711,12 +757,9 @@ def _generate_setup_custom(self): prnt = self._prnt - prnt('static PyObject *_cffi_setup_custom(PyObject *lib)') + prnt('static int _cffi_setup_custom(PyObject *lib)') prnt('{') - prnt(' if (%s < 0)' % self._chained_list_constants[True]) - prnt(' return NULL;') - prnt(' Py_INCREF(Py_None);') - prnt(' return Py_None;') + prnt(' return %s;' % self._chained_list_constants[True]) prnt('}') cffimod_header = r''' @@ -834,17 +877,20 @@ static void *_cffi_exports[_CFFI_NUM_EXPORTS]; static PyObject *_cffi_types, *_cffi_VerificationError; -static PyObject *_cffi_setup_custom(PyObject *lib); /* forward */ +static int _cffi_setup_custom(PyObject *lib); /* forward */ static PyObject *_cffi_setup(PyObject *self, PyObject *args) { PyObject *library; + int was_alive = (_cffi_types != NULL); if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, &library)) return NULL; Py_INCREF(_cffi_types); Py_INCREF(_cffi_VerificationError); - return _cffi_setup_custom(library); + if (_cffi_setup_custom(library) < 0) + return NULL; + return PyBool_FromLong(was_alive); } static void _cffi_init(void) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -20,15 +20,15 @@ # up in kwds['export_symbols']. kwds.setdefault('export_symbols', self.export_symbols) - def find_module(self, module_name, path, so_suffix): - basename = module_name + so_suffix - if path is None: - path = sys.path - for dirname in path: - filename = os.path.join(dirname, basename) - if os.path.isfile(filename): - return filename - return None + def find_module(self, module_name, path, so_suffixes): + for so_suffix in so_suffixes: + basename = module_name + so_suffix + if path is None: + path = sys.path + for dirname in path: + filename = os.path.join(dirname, basename) + if os.path.isfile(filename): + return filename def collect_types(self): pass # not needed in the generic engine @@ -173,6 +173,7 @@ newfunction = self._load_constant(False, tp, name, module) else: indirections = [] + base_tp = tp if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): indirect_args = [] for i, typ in enumerate(tp.args): @@ -186,16 +187,18 @@ wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) for i, typ in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, typ) + newfunction = self._make_struct_wrapper(newfunction, i, typ, + base_tp) setattr(library, name, newfunction) type(library)._cffi_dir.append(name) - def _make_struct_wrapper(self, oldfunc, i, tp): + def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): backend = self.ffi._backend BType = self.ffi._get_cached_btype(tp) def newfunc(*args): args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] return oldfunc(*args) + newfunc._cffi_base_type = base_tp return newfunc # ---------- @@ -252,11 +255,14 @@ prnt(' static ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return nums[i];') @@ -270,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi.typeof("ssize_t(*)(ssize_t)") + BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -279,7 +285,7 @@ if x < 0: break layout.append(x) num += 1 - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -316,9 +322,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -379,15 +386,17 @@ def _load_constant(self, is_int, tp, name, module): funcname = '_cffi_const_%s' % name if is_int: - BFunc = self.ffi.typeof("int(*)(long long*)") + BType = self.ffi._typeof_locked("long long*")[0] + BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) - p = self.ffi.new("long long*") + p = self.ffi.new(BType) negative = function(p) value = int(p[0]) if value < 0 and not negative: - value += (1 << (8*self.ffi.sizeof("long long"))) + BLongLong = self.ffi._typeof_locked("long long")[0] + value += (1 << (8*self.ffi.sizeof(BLongLong))) else: - BFunc = self.ffi.typeof(tp.get_c_name('(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() return value @@ -413,11 +422,22 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' snprintf(out_error, 255,' - '"%s has the real value %d, not %d",') - prnt(' "%s", (int)%s, %d);' % ( - enumerator, enumerator, enumvalue)) + ' "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % ( + enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') @@ -431,10 +451,11 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BFunc = self.ffi.typeof("int(*)(char*)") + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = '_cffi_e_%s_%s' % (prefix, name) function = module.load_function(BFunc, funcname) - p = self.ffi.new("char[]", 256) + p = self.ffi.new(BType, 256) if function(p) < 0: error = self.ffi.string(p) if sys.version_info >= (3,): @@ -465,6 +486,14 @@ def _generate_gen_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): + if tp.length == '...': + prnt = self._prnt + funcname = '_cffi_sizeof_%s' % (name,) + self.export_symbols.append(funcname) + prnt("size_t %s(void)" % funcname) + prnt("{") + prnt(" return sizeof(%s);" % (name,)) + prnt("}") tp_ptr = model.PointerType(tp.item) self._generate_gen_const(False, name, tp_ptr) else: @@ -476,6 +505,18 @@ def _loaded_gen_variable(self, tp, name, module, library): if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the # sense that "a=..." is forbidden + if tp.length == '...': + funcname = '_cffi_sizeof_%s' % (name,) + BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0] + function = module.load_function(BFunc, funcname) + size = function() + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) tp_ptr = model.PointerType(tp.item) value = self._load_constant(False, tp_ptr, name, module) # 'value' is a which we have to replace with @@ -489,7 +530,7 @@ # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. funcname = '_cffi_var_%s' % name - BFunc = self.ffi.typeof(tp.get_c_name('*(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0] function = module.load_function(BFunc, funcname) ptr = function() def getter(library): diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -31,7 +31,7 @@ k2 = k2.lstrip('0').rstrip('L') modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key, k1, k2) - suffix = _get_so_suffix() + suffix = _get_so_suffixes()[0] self.tmpdir = tmpdir or _caller_dir_pycache() self.sourcefilename = os.path.join(self.tmpdir, modulename + '.c') self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) @@ -42,18 +42,21 @@ def write_source(self, file=None): """Write the C source code. It is produced in 'self.sourcefilename', which can be tweaked beforehand.""" - if self._has_source and file is None: - raise ffiplatform.VerificationError("source code already written") - self._write_source(file) + with self.ffi._lock: + if self._has_source and file is None: + raise ffiplatform.VerificationError( + "source code already written") + self._write_source(file) def compile_module(self): """Write the C source code (if not done already) and compile it. This produces a dynamic link library in 'self.modulefilename'.""" - if self._has_module: - raise ffiplatform.VerificationError("module already compiled") - if not self._has_source: - self._write_source() - self._compile_module() + with self.ffi._lock: + if self._has_module: + raise ffiplatform.VerificationError("module already compiled") + if not self._has_source: + self._write_source() + self._compile_module() def load_library(self): """Get a C module from this Verifier instance. @@ -62,11 +65,14 @@ operations to the C module. If necessary, the C code is written and compiled first. """ - if not self._has_module: - self._locate_module() + with self.ffi._lock: if not self._has_module: - self.compile_module() - return self._load_library() + self._locate_module() + if not self._has_module: + if not self._has_source: + self._write_source() + self._compile_module() + return self._load_library() def get_module_name(self): basename = os.path.basename(self.modulefilename) @@ -81,7 +87,9 @@ def get_extension(self): if not self._has_source: - self._write_source() + with self.ffi._lock: + if not self._has_source: + self._write_source() sourcename = ffiplatform.maybe_relative_path(self.sourcefilename) modname = self.get_module_name() return ffiplatform.get_extension(sourcename, modname, **self.kwds) @@ -103,7 +111,7 @@ else: path = None filename = self._vengine.find_module(self.get_module_name(), path, - _get_so_suffix()) + _get_so_suffixes()) if filename is None: return self.modulefilename = filename @@ -193,7 +201,7 @@ if keep_so: suffix = '.c' # only remove .c files else: - suffix = _get_so_suffix().lower() + suffix = _get_so_suffixes()[0].lower() for fn in filelist: if fn.lower().startswith('_cffi_') and ( fn.lower().endswith(suffix) or fn.lower().endswith('.c')): @@ -213,15 +221,20 @@ except OSError: pass -def _get_so_suffix(): +def _get_so_suffixes(): + suffixes = [] for suffix, mode, type in imp.get_suffixes(): if type == imp.C_EXTENSION: - return suffix - # bah, no C_EXTENSION available. Occurs on pypy without cpyext - if sys.platform == 'win32': - return ".pyd" - else: - return ".so" + suffixes.append(suffix) + + if not suffixes: + # bah, no C_EXTENSION available. Occurs on pypy without cpyext + if sys.platform == 'win32': + suffixes = [".pyd"] + else: + suffixes = [".so"] + + return suffixes def _ensure_dir(filename): try: diff --git a/lib_pypy/numpypy/__init__.py b/lib_pypy/numpypy/__init__.py deleted file mode 100644 --- a/lib_pypy/numpypy/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -from . import core -from .core import * -from . import lib -from .lib import * - -from __builtin__ import bool, int, long, float, complex, object, unicode, str - -from .core import round, abs, max, min - -__version__ = '1.7.0' - -__all__ = ['__version__'] -__all__ += core.__all__ -__all__ += lib.__all__ - -#import sys -#sys.modules.setdefault('numpy', sys.modules['numpypy']) diff --git a/lib_pypy/numpypy/core/__init__.py b/lib_pypy/numpypy/core/__init__.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from . import multiarray -from . import umath -from . import numeric -from .numeric import * -from . import fromnumeric -from .fromnumeric import * -from . import shape_base -from .shape_base import * - -from .fromnumeric import amax as max, amin as min, \ - round_ as round -from .numeric import absolute as abs - -__all__ = [] -__all__ += numeric.__all__ -__all__ += fromnumeric.__all__ -__all__ += shape_base.__all__ diff --git a/lib_pypy/numpypy/core/_methods.py b/lib_pypy/numpypy/core/_methods.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/_methods.py +++ /dev/null @@ -1,109 +0,0 @@ -# Array methods which are called by the both the C-code for the method -# and the Python code for the NumPy-namespace function - -import multiarray as mu -import umath as um -from numeric import asanyarray - -def _amax(a, axis=None, out=None, keepdims=False): - return um.maximum.reduce(a, axis=axis, - out=out, keepdims=keepdims) - -def _amin(a, axis=None, out=None, keepdims=False): - return um.minimum.reduce(a, axis=axis, - out=out, keepdims=keepdims) - -def _sum(a, axis=None, dtype=None, out=None, keepdims=False): - return um.add.reduce(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def _prod(a, axis=None, dtype=None, out=None, keepdims=False): - return um.multiply.reduce(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def _any(a, axis=None, dtype=None, out=None, keepdims=False): - return um.logical_or.reduce(a, axis=axis, dtype=dtype, out=out, - keepdims=keepdims) - -def _all(a, axis=None, dtype=None, out=None, keepdims=False): - return um.logical_and.reduce(a, axis=axis, dtype=dtype, out=out, - keepdims=keepdims) - -def _count_reduce_items(arr, axis): - if axis is None: - axis = tuple(xrange(arr.ndim)) - if not isinstance(axis, tuple): - axis = (axis,) - items = 1 - for ax in axis: - items *= arr.shape[ax] - return items - -def _mean(a, axis=None, dtype=None, out=None, keepdims=False): - arr = asanyarray(a) - - # Upgrade bool, unsigned int, and int to float64 - if dtype is None and arr.dtype.kind in ['b','u','i']: - ret = um.add.reduce(arr, axis=axis, dtype='f8', - out=out, keepdims=keepdims) - else: - ret = um.add.reduce(arr, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - rcount = _count_reduce_items(arr, axis) - if isinstance(ret, mu.ndarray): - ret = um.true_divide(ret, rcount, - out=ret, casting='unsafe', subok=False) - else: - ret = ret / float(rcount) - return ret - -def _var(a, axis=None, dtype=None, out=None, ddof=0, - keepdims=False): - arr = asanyarray(a) - - # First compute the mean, saving 'rcount' for reuse later - if dtype is None and arr.dtype.kind in ['b','u','i']: - arrmean = um.add.reduce(arr, axis=axis, dtype='f8', keepdims=True) - else: - arrmean = um.add.reduce(arr, axis=axis, dtype=dtype, keepdims=True) - rcount = _count_reduce_items(arr, axis) - if isinstance(arrmean, mu.ndarray): - arrmean = um.true_divide(arrmean, rcount, - out=arrmean, casting='unsafe', subok=False) - else: - arrmean = arrmean / float(rcount) - - # arr - arrmean - x = arr - arrmean - - # (arr - arrmean) ** 2 - if arr.dtype.kind == 'c': - x = um.multiply(x, um.conjugate(x), out=x).real - else: - x = um.multiply(x, x, out=x) - - # add.reduce((arr - arrmean) ** 2, axis) - ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - - # add.reduce((arr - arrmean) ** 2, axis) / (n - ddof) - if not keepdims and isinstance(rcount, mu.ndarray): - rcount = rcount.squeeze(axis=axis) - rcount -= ddof - if isinstance(ret, mu.ndarray): - ret = um.true_divide(ret, rcount, - out=ret, casting='unsafe', subok=False) - else: - ret = ret / float(rcount) - - return ret - -def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): From noreply at buildbot.pypy.org Tue Nov 19 20:10:54 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 19 Nov 2013 20:10:54 +0100 (CET) Subject: [pypy-commit] pypy numpypy-array_prepare_-array_wrap: Forgot to solve this merge conflict Message-ID: <20131119191054.9AA051C0225@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-array_prepare_-array_wrap Changeset: r68247:0559ff165dad Date: 2013-11-19 20:10 +0100 http://bitbucket.org/pypy/pypy/changeset/0559ff165dad/ Log: Forgot to solve this merge conflict diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -313,9 +313,6 @@ self.w_flags = W_FlagsObject(self) return self.w_flags - def descr_dtype(self, space): - return self._get_dtype(space) - class W_BoolBox(W_GenericBox, PrimitiveBox): descr__new__, _get_dtype, descr_reduce = new_dtype_getter("bool") @@ -567,9 +564,6 @@ conjugate = interp2app(W_GenericBox.descr_conjugate), astype = interp2app(W_GenericBox.descr_astype), view = interp2app(W_GenericBox.descr_view), -<<<<<<< local - dtype = GetSetProperty(W_GenericBox.descr_dtype) -======= squeeze = interp2app(W_GenericBox.descr_self), copy = interp2app(W_GenericBox.descr_copy), @@ -582,7 +576,6 @@ ndim = GetSetProperty(W_GenericBox.descr_get_ndim), T = GetSetProperty(W_GenericBox.descr_self), flags = GetSetProperty(W_GenericBox.descr_get_flags), ->>>>>>> other ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, From noreply at buildbot.pypy.org Tue Nov 19 21:09:07 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 19 Nov 2013 21:09:07 +0100 (CET) Subject: [pypy-commit] pypy numpypy-array_prepare_-array_wrap: sum doesn't exist in numpypy Message-ID: <20131119200907.816361C147C@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-array_prepare_-array_wrap Changeset: r68248:b5c80215f7e5 Date: 2013-11-19 21:08 +0100 http://bitbucket.org/pypy/pypy/changeset/b5c80215f7e5/ Log: sum doesn't exist in numpypy diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -374,7 +374,7 @@ raises(TypeError, log, a, out=c) def test___array_prepare__reduce(self): - from numpypy import ndarray, array, sum, ones, add + from numpypy import ndarray, array, ones, add class with_prepare(ndarray): def __array_prepare__(self, arr, context): x = array(arr).view(type=with_prepare) @@ -382,7 +382,7 @@ print 'called_prepare',arr return x a = ones(2).view(type=with_prepare) - x = sum(a) + x = a.sum() assert type(x) == with_prepare assert x.shape == () # reduce functions do not call prepare, is this a numpy 'feature'? @@ -391,6 +391,6 @@ assert type(x) == with_prepare assert not getattr(x, 'called_prepare',False) a = ones((2,3)).view(type=with_prepare) - x = sum(a, axis=0) + x = a.sum(axis=0) assert type(x) == with_prepare assert not getattr(x, 'called_prepare',False) From noreply at buildbot.pypy.org Wed Nov 20 01:47:51 2013 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 20 Nov 2013 01:47:51 +0100 (CET) Subject: [pypy-commit] pypy default: backout afb227c, breaks everything else and doesn't fix freebsd Message-ID: <20131120004751.D41301C1DBD@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68249:55b6a37713d9 Date: 2013-11-19 19:46 -0500 http://bitbucket.org/pypy/pypy/changeset/55b6a37713d9/ Log: backout afb227c, breaks everything else and doesn't fix freebsd diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -4,7 +4,6 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rarithmetic import r_uint -from rpython.rlib.objectmodel import we_are_translated from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform @@ -80,38 +79,6 @@ RTLD_NOW = cConfig.RTLD_NOW RTLD_LAZY = cConfig.RTLD_LAZY - _t_opened = {} - - def t_dlopen(name): - # for direct execution: can't use the regular way on FreeBSD :-( - # http://factor-language.blogspot.de/2009/02/note-about-libdl-functions-on-netbsd.html - import ctypes - if name: - name = rffi.charp2str(name) - else: - name = None - try: - res = ctypes.cdll.LoadLibrary(name) - except OSError, e: - raise DLOpenError(str(e)) - h = rffi.cast(rffi.VOIDP, res._handle) - _t_opened[rffi.cast(rffi.LONG, h)] = res - return h - - def t_dlclose(handle): - _t_opened.pop(rffi.cast(rffi.LONG, handle)) - return rffi.cast(rffi.INT, 0) - - def t_dldym(handle, name): - import ctypes - lib = _t_opened[rffi.cast(rffi.LONG, handle)] - try: - symbol = lib[name] - except AttributeError: - raise KeyError(name) - res = ctypes.cast(symbol, ctypes.c_void_p) - return rffi.cast(rffi.VOIDP, res.value or 0) - def dlerror(): # XXX this would never work on top of ll2ctypes, because # ctypes are calling dlerror itself, unsure if I can do much in this @@ -124,8 +91,6 @@ def dlopen(name, mode=-1): """ Wrapper around C-level dlopen """ - if not we_are_translated(): - return t_dlopen(name) if mode == -1: if RTLD_LOCAL is not None: mode = RTLD_LOCAL @@ -139,16 +104,11 @@ raise DLOpenError(err) return res - def dlclose(handle): - if not we_are_translated(): - return t_dlclose(handle) - return c_dlclose(handle) + dlclose = c_dlclose def dlsym(libhandle, name): """ Wrapper around C-level dlsym """ - if not we_are_translated(): - return t_dldym(libhandle, name) res = c_dlsym(libhandle, name) if not res: raise KeyError(name) diff --git a/rpython/rlib/test/test_rdynload.py b/rpython/rlib/test/test_rdynload.py --- a/rpython/rlib/test/test_rdynload.py +++ b/rpython/rlib/test/test_rdynload.py @@ -21,4 +21,3 @@ lltype.Signed)), dlsym(lib, 'abs')) assert 1 == handle(1) assert 1 == handle(-1) - dlclose(lib) From noreply at buildbot.pypy.org Wed Nov 20 02:04:57 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 20 Nov 2013 02:04:57 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: hg merge default Message-ID: <20131120010457.CE6931C237F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68250:fb005e046dc4 Date: 2013-11-19 23:41 +0100 http://bitbucket.org/pypy/pypy/changeset/fb005e046dc4/ Log: hg merge default diff too long, truncating to 2000 out of 47956 lines diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,7 +1,38 @@ all: pypy-c +PYPY_EXECUTABLE := $(shell which pypy) +URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") + +ifeq ($(PYPY_EXECUTABLE),) +RUNINTERP = python +else +RUNINTERP = $(PYPY_EXECUTABLE) +endif + pypy-c: - @echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM" - @sleep 3 - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + @echo + @echo "====================================================================" +ifeq ($(PYPY_EXECUTABLE),) + @echo "Building a regular (jitting) version of PyPy, using CPython." + @echo "This takes around 2 hours and $(URAM) GB of RAM." + @echo "Note that pre-installing a PyPy binary would reduce this time" + @echo "and produce basically the same result." +else + @echo "Building a regular (jitting) version of PyPy, using" + @echo "$(PYPY_EXECUTABLE) to run the translation itself." + @echo "This takes up to 1 hour and $(URAM) GB of RAM." +endif + @echo + @echo "For more control (e.g. to use multiple CPU cores during part of" + @echo "the process) you need to run \`\`rpython/bin/rpython'' directly." + @echo "For more information see \`\`http://pypy.org/download.html''." + @echo "====================================================================" + @echo + @sleep 5 + $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + +# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are +# replaced with an opaque --jobserver option by the time this Makefile +# runs. We cannot get their original value either: +# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -1,5 +1,5 @@ ===================================== -PyPy: Python in Python Implementation +PyPy: Python in Python Implementation ===================================== Welcome to PyPy! @@ -26,12 +26,14 @@ Building ======== -build with:: +build with: - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py +.. code-block:: console + + $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest -to use virtualenv with the resulting pypy-c as the interpreter, you can +to use virtualenv with the resulting pypy-c as the interpreter; you can find more details about various installation schemes here: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy diff --git a/lib-python/2.7/argparse.py b/lib-python/2.7/argparse.py --- a/lib-python/2.7/argparse.py +++ b/lib-python/2.7/argparse.py @@ -1780,7 +1780,19 @@ # error if this argument is not allowed with other previously # seen arguments, assuming that actions that use the default # value don't really count as "present" - if argument_values is not action.default: + + # XXX PyPy bug-to-bug compatibility: "is" on primitive types + # is not consistent in CPython. We'll assume it is close + # enough for ints (which is true only for "small ints"), but + # for floats and longs and complexes we'll go for the option + # of forcing "is" to say False, like it usually does on + # CPython. A fix is pending on CPython trunk + # (http://bugs.python.org/issue18943) but that might change + # the details of the semantics and so not be applied to 2.7. + # See the line AA below. + + if (argument_values is not action.default or + type(argument_values) in (float, long, complex)): # AA seen_non_default_actions.add(action) for conflict_action in action_conflicts.get(action, []): if conflict_action in seen_non_default_actions: diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -4,6 +4,21 @@ from __pypy__.builders import StringBuilder, UnicodeBuilder +class StringOrUnicodeBuilder(object): + def __init__(self): + self._builder = StringBuilder() + def append(self, string): + try: + self._builder.append(string) + except UnicodeEncodeError: + ub = UnicodeBuilder() + ub.append(self._builder.build()) + self._builder = ub + ub.append(string) + def build(self): + return self._builder.build() + + ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') @@ -192,7 +207,7 @@ if self.ensure_ascii: builder = StringBuilder() else: - builder = UnicodeBuilder() + builder = StringOrUnicodeBuilder() self.__encode(o, markers, builder, 0) return builder.build() diff --git a/lib-python/2.7/string.py b/lib-python/2.7/string.py --- a/lib-python/2.7/string.py +++ b/lib-python/2.7/string.py @@ -66,16 +66,17 @@ must be of the same length. """ - if len(fromstr) != len(tostr): + n = len(fromstr) + if n != len(tostr): raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) + # this function has been rewritten to suit PyPy better; it is + # almost 10x faster than the original. + buf = bytearray(256) + for i in range(256): + buf[i] = i + for i in range(n): + buf[ord(fromstr[i])] = tostr[i] + return str(buf) diff --git a/lib-python/2.7/test/keycert.pem b/lib-python/2.7/test/keycert.pem --- a/lib-python/2.7/test/keycert.pem +++ b/lib-python/2.7/test/keycert.pem @@ -1,32 +1,31 @@ ------BEGIN RSA PRIVATE KEY----- -MIICXwIBAAKBgQC8ddrhm+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9L -opdJhTvbGfEj0DQs1IE8M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVH -fhi/VwovESJlaBOp+WMnfhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQAB -AoGBAK0FZpaKj6WnJZN0RqhhK+ggtBWwBnc0U/ozgKz2j1s3fsShYeiGtW6CK5nU -D1dZ5wzhbGThI7LiOXDvRucc9n7vUgi0alqPQ/PFodPxAN/eEYkmXQ7W2k7zwsDA -IUK0KUhktQbLu8qF/m8qM86ba9y9/9YkXuQbZ3COl5ahTZrhAkEA301P08RKv3KM -oXnGU2UHTuJ1MAD2hOrPxjD4/wxA/39EWG9bZczbJyggB4RHu0I3NOSFjAm3HQm0 -ANOu5QK9owJBANgOeLfNNcF4pp+UikRFqxk5hULqRAWzVxVrWe85FlPm0VVmHbb/ -loif7mqjU8o1jTd/LM7RD9f2usZyE2psaw8CQQCNLhkpX3KO5kKJmS9N7JMZSc4j -oog58yeYO8BBqKKzpug0LXuQultYv2K4veaIO04iL9VLe5z9S/Q1jaCHBBuXAkEA -z8gjGoi1AOp6PBBLZNsncCvcV/0aC+1se4HxTNo2+duKSDnbq+ljqOM+E7odU+Nq -ewvIWOG//e8fssd0mq3HywJBAJ8l/c8GVmrpFTx8r/nZ2Pyyjt3dH1widooDXYSV -q6Gbf41Llo5sYAtmxdndTLASuHKecacTgZVhy0FryZpLKrU= ------END RSA PRIVATE KEY----- +-----BEGIN PRIVATE KEY----- +MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANtb0+YrKuxevGpm +LrjaUhZSgz6zFAmuGFmKmUbdjmfv9zSmmdsQIksK++jK0Be9LeZy20j6ahOfuVa0 +ufEmPoP7Fy4hXegKZR9cCWcIe/A6H2xWF1IIJLRTLaU8ol/I7T+um5HD5AwAwNPP +USNU0Eegmvp+xxWu3NX2m1Veot85AgMBAAECgYA3ZdZ673X0oexFlq7AAmrutkHt +CL7LvwrpOiaBjhyTxTeSNWzvtQBkIU8DOI0bIazA4UreAFffwtvEuPmonDb3F+Iq +SMAu42XcGyVZEl+gHlTPU9XRX7nTOXVt+MlRRRxL6t9GkGfUAXI3XxJDXW3c0vBK +UL9xqD8cORXOfE06rQJBAP8mEX1ERkR64Ptsoe4281vjTlNfIbs7NMPkUnrn9N/Y +BLhjNIfQ3HFZG8BTMLfX7kCS9D593DW5tV4Z9BP/c6cCQQDcFzCcVArNh2JSywOQ +ZfTfRbJg/Z5Lt9Fkngv1meeGNPgIMLN8Sg679pAOOWmzdMO3V706rNPzSVMME7E5 +oPIfAkEA8pDddarP5tCvTTgUpmTFbakm0KoTZm2+FzHcnA4jRh+XNTjTOv98Y6Ik +eO5d1ZnKXseWvkZncQgxfdnMqqpj5wJAcNq/RVne1DbYlwWchT2Si65MYmmJ8t+F +0mcsULqjOnEMwf5e+ptq5LzwbyrHZYq5FNk7ocufPv/ZQrcSSC+cFwJBAKvOJByS +x56qyGeZLOQlWS2JS3KJo59XuLFGqcbgN9Om9xFa41Yb4N9NvplFivsvZdw3m1Q/ +SPIXQuT8RMPDVNQ= +-----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- -MIICpzCCAhCgAwIBAgIJAP+qStv1cIGNMA0GCSqGSIb3DQEBBQUAMIGJMQswCQYD -VQQGEwJVUzERMA8GA1UECBMIRGVsYXdhcmUxEzARBgNVBAcTCldpbG1pbmd0b24x -IzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9uMQwwCgYDVQQLEwNT -U0wxHzAdBgNVBAMTFnNvbWVtYWNoaW5lLnB5dGhvbi5vcmcwHhcNMDcwODI3MTY1 -NDUwWhcNMTMwMjE2MTY1NDUwWjCBiTELMAkGA1UEBhMCVVMxETAPBgNVBAgTCERl -bGF3YXJlMRMwEQYDVQQHEwpXaWxtaW5ndG9uMSMwIQYDVQQKExpQeXRob24gU29m -dHdhcmUgRm91bmRhdGlvbjEMMAoGA1UECxMDU1NMMR8wHQYDVQQDExZzb21lbWFj -aGluZS5weXRob24ub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC8ddrh -m+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9LopdJhTvbGfEj0DQs1IE8 -M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVHfhi/VwovESJlaBOp+WMn -fhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQABoxUwEzARBglghkgBhvhC -AQEEBAMCBkAwDQYJKoZIhvcNAQEFBQADgYEAF4Q5BVqmCOLv1n8je/Jw9K669VXb -08hyGzQhkemEBYQd6fzQ9A/1ZzHkJKb1P6yreOLSEh4KcxYPyrLRC1ll8nr5OlCx -CMhKkTnR6qBsdNV0XtdU2+N25hqW+Ma4ZeqsN/iiJVCGNOZGnvQuvCAGWF8+J/f/ -iHkC6gGdBJhogs4= +MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV +BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw +MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH +Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k +YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw +gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7 +6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt +pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw +FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd +BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G +lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1 +CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/sha256.pem b/lib-python/2.7/test/sha256.pem --- a/lib-python/2.7/test/sha256.pem +++ b/lib-python/2.7/test/sha256.pem @@ -1,129 +1,128 @@ # Certificate chain for https://sha256.tbs-internet.com - 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=sha-256 production/CN=sha256.tbs-internet.com - i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC + 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=Certificats TBS X509/CN=ecom.tbs-x509.com + i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business -----BEGIN CERTIFICATE----- -MIIGXTCCBUWgAwIBAgIRAMmag+ygSAdxZsbyzYjhuW0wDQYJKoZIhvcNAQELBQAw -gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl +MIIGTjCCBTagAwIBAgIQOh3d9dNDPq1cSdJmEiMpqDANBgkqhkiG9w0BAQUFADCB +yTELMAkGA1UEBhMCRlIxETAPBgNVBAgTCENhbHZhZG9zMQ0wCwYDVQQHEwRDYWVu +MRUwEwYDVQQKEwxUQlMgSU5URVJORVQxSDBGBgNVBAsTP1Rlcm1zIGFuZCBDb25k +aXRpb25zOiBodHRwOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0EvcmVwb3NpdG9y +eTEYMBYGA1UECxMPVEJTIElOVEVSTkVUIENBMR0wGwYDVQQDExRUQlMgWDUwOSBD +QSBidXNpbmVzczAeFw0xMTAxMjUwMDAwMDBaFw0xMzAyMDUyMzU5NTlaMIHHMQsw +CQYDVQQGEwJGUjEOMAwGA1UEERMFMTQwMDAxETAPBgNVBAgTCENhbHZhZG9zMQ0w +CwYDVQQHEwRDQUVOMRswGQYDVQQJExIyMiBydWUgZGUgQnJldGFnbmUxFTATBgNV +BAoTDFRCUyBJTlRFUk5FVDEXMBUGA1UECxMOMDAwMiA0NDA0NDM4MTAxHTAbBgNV +BAsTFENlcnRpZmljYXRzIFRCUyBYNTA5MRowGAYDVQQDExFlY29tLnRicy14NTA5 +LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKRrlHUnJ++1lpcg +jtYco7cdmRe+EEfTmwPfCdfV3G1QfsTSvY6FfMpm/83pqHfT+4ANwr18wD9ZrAEN +G16mf9VdCGK12+TP7DmqeZyGIqlFFoahQnmb8EarvE43/1UeQ2CV9XmzwZvpqeli +LfXsFonawrY3H6ZnMwS64St61Z+9gdyuZ/RbsoZBbT5KUjDEG844QRU4OT1IGeEI +eY5NM5RNIh6ZNhVtqeeCxMS7afONkHQrOco73RdSTRck/Hj96Ofl3MHNHryr+AMK +DGFk1kLCZGpPdXtkxXvaDeQoiYDlil26CWc+YK6xyDPMdsWvoG14ZLyCpzMXA7/7 +4YAQRH0CAwEAAaOCAjAwggIsMB8GA1UdIwQYMBaAFBoJBMz5CY+7HqDO1KQUf0vV +I1jNMB0GA1UdDgQWBBQgOU8HsWzbmD4WZP5Wtdw7jca2WDAOBgNVHQ8BAf8EBAMC +BaAwDAYDVR0TAQH/BAIwADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw +TAYDVR0gBEUwQzBBBgsrBgEEAYDlNwIBATAyMDAGCCsGAQUFBwIBFiRodHRwczov +L3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL0NQUzEwdwYDVR0fBHAwbjA3oDWgM4Yx +aHR0cDovL2NybC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNy +bDAzoDGgL4YtaHR0cDovL2NybC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5l +c3MuY3JsMIGwBggrBgEFBQcBAQSBozCBoDA9BggrBgEFBQcwAoYxaHR0cDovL2Ny +dC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNydDA5BggrBgEF +BQcwAoYtaHR0cDovL2NydC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5lc3Mu +Y3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wMwYDVR0R +BCwwKoIRZWNvbS50YnMteDUwOS5jb22CFXd3dy5lY29tLnRicy14NTA5LmNvbTAN +BgkqhkiG9w0BAQUFAAOCAQEArT4NHfbY87bGAw8lPV4DmHlmuDuVp/y7ltO3Ynse +3Rz8RxW2AzuO0Oy2F0Cu4yWKtMyEyMXyHqWtae7ElRbdTu5w5GwVBLJHClCzC8S9 +SpgMMQTx3Rgn8vjkHuU9VZQlulZyiPK7yunjc7c310S9FRZ7XxOwf8Nnx4WnB+No +WrfApzhhQl31w+RyrNxZe58hCfDDHmevRvwLjQ785ZoQXJDj2j3qAD4aI2yB8lB5 +oaE1jlCJzC7Kmz/Y9jzfmv/zAs1LQTm9ktevv4BTUFaGjv9jxnQ1xnS862ZiouLW +zZYIlYPf4F6JjXGiIQgQRglILUfq3ftJd9/ok9W9ZF8h8w== +-----END CERTIFICATE----- + 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business + i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root +-----BEGIN CERTIFICATE----- +MIIFPzCCBCegAwIBAgIQDlBz/++iRSmLDeVRHT/hADANBgkqhkiG9w0BAQUFADBv +MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk +ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF +eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDcwOTE4MTkyMlow +gckxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv -cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg -Q0EgU0dDMB4XDTEwMDIxODAwMDAwMFoXDTEyMDIxOTIzNTk1OVowgcsxCzAJBgNV -BAYTAkZSMQ4wDAYDVQQREwUxNDAwMDERMA8GA1UECBMIQ2FsdmFkb3MxDTALBgNV -BAcTBENBRU4xGzAZBgNVBAkTEjIyIHJ1ZSBkZSBCcmV0YWduZTEVMBMGA1UEChMM -VEJTIElOVEVSTkVUMRcwFQYDVQQLEw4wMDAyIDQ0MDQ0MzgxMDEbMBkGA1UECxMS -c2hhLTI1NiBwcm9kdWN0aW9uMSAwHgYDVQQDExdzaGEyNTYudGJzLWludGVybmV0 -LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbuM8VT7f0nntwu -N3F7v9KIBlhKNAxqCrziOXU5iqUt8HrQB3DtHbdmII+CpVUlwlmepsx6G+srEZ9a -MIGAy0nxi5aLb7watkyIdPjJTMvTUBQ/+RPWzt5JtYbbY9BlJ+yci0dctP74f4NU -ISLtlrEjUbf2gTohLrcE01TfmOF6PDEbB5PKDi38cB3NzKfizWfrOaJW6Q1C1qOJ -y4/4jkUREX1UFUIxzx7v62VfjXSGlcjGpBX1fvtABQOSLeE0a6gciDZs1REqroFf -5eXtqYphpTa14Z83ITXMfgg5Nze1VtMnzI9Qx4blYBw4dgQVEuIsYr7FDBOITDzc -VEVXZx0CAwEAAaOCAj8wggI7MB8GA1UdIwQYMBaAFAdEdoWTKLx/bXjSCuv6TEvf -2YIfMB0GA1UdDgQWBBSJKI/AYVI9RQNY0QPIqc8ej2QivTAOBgNVHQ8BAf8EBAMC -BaAwDAYDVR0TAQH/BAIwADA0BgNVHSUELTArBggrBgEFBQcDAQYIKwYBBQUHAwIG -CisGAQQBgjcKAwMGCWCGSAGG+EIEATBMBgNVHSAERTBDMEEGCysGAQQBgOU3AgQB -MDIwMAYIKwYBBQUHAgEWJGh0dHBzOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0Ev -Q1BTNDBtBgNVHR8EZjBkMDKgMKAuhixodHRwOi8vY3JsLnRicy1pbnRlcm5ldC5j -b20vVEJTWDUwOUNBU0dDLmNybDAuoCygKoYoaHR0cDovL2NybC50YnMteDUwOS5j -b20vVEJTWDUwOUNBU0dDLmNybDCBpgYIKwYBBQUHAQEEgZkwgZYwOAYIKwYBBQUH -MAKGLGh0dHA6Ly9jcnQudGJzLWludGVybmV0LmNvbS9UQlNYNTA5Q0FTR0MuY3J0 -MDQGCCsGAQUFBzAChihodHRwOi8vY3J0LnRicy14NTA5LmNvbS9UQlNYNTA5Q0FT -R0MuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wPwYD -VR0RBDgwNoIXc2hhMjU2LnRicy1pbnRlcm5ldC5jb22CG3d3dy5zaGEyNTYudGJz -LWludGVybmV0LmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAA5NL0D4QSqhErhlkdPmz -XtiMvdGL+ZehM4coTRIpasM/Agt36Rc0NzCvnQwKE+wkngg1Gy2qe7Q0E/ziqBtB -fZYzdVgu1zdiL4kTaf+wFKYAFGsFbyeEmXysy+CMwaNoF2vpSjCU1UD56bEnTX/W -fxVZYxtBQUpnu2wOsm8cDZuZRv9XrYgAhGj9Tt6F0aVHSDGn59uwShG1+BVF/uju -SCyPTTjL1oc7YElJUzR/x4mQJYvtQI8gDIDAGEOs7v3R/gKa5EMfbUQUI4C84UbI -Yz09Jdnws/MkC/Hm1BZEqk89u7Hvfv+oHqEb0XaUo0TDfsxE0M1sMdnLb91QNQBm -UQ== ------END CERTIFICATE----- - 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC - i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root ------BEGIN CERTIFICATE----- -MIIFVjCCBD6gAwIBAgIQXpDZ0ETJMV02WTx3GTnhhTANBgkqhkiG9w0BAQUFADBv -MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk -ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF -eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDYyNDE5MDYzMFow -gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl -bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u -ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv -cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg -Q0EgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsgOkO3f7wzN6 -rOjg45tR5vjBfzK7qmV9IBxb/QW9EEXxG+E7FNhZqQLtwGBKoSsHTnQqV75wWMk0 -9tinWvftBkSpj5sTi/8cbzJfUvTSVYh3Qxv6AVVjMMH/ruLjE6y+4PoaPs8WoYAQ -ts5R4Z1g8c/WnTepLst2x0/Wv7GmuoQi+gXvHU6YrBiu7XkeYhzc95QdviWSJRDk -owhb5K43qhcvjRmBfO/paGlCliDGZp8mHwrI21mwobWpVjTxZRwYO3bd4+TGcI4G -Ie5wmHwE8F7SK1tgSqbBacKjDa93j7txKkfz/Yd2n7TGqOXiHPsJpG655vrKtnXk -9vs1zoDeJQIDAQABo4IBljCCAZIwHQYDVR0OBBYEFAdEdoWTKLx/bXjSCuv6TEvf -2YIfMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMCAGA1UdJQQZ -MBcGCisGAQQBgjcKAwMGCWCGSAGG+EIEATAYBgNVHSAEETAPMA0GCysGAQQBgOU3 -AgQBMHsGA1UdHwR0MHIwOKA2oDSGMmh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL0Fk -ZFRydXN0RXh0ZXJuYWxDQVJvb3QuY3JsMDagNKAyhjBodHRwOi8vY3JsLmNvbW9k -by5uZXQvQWRkVHJ1c3RFeHRlcm5hbENBUm9vdC5jcmwwgYAGCCsGAQUFBwEBBHQw -cjA4BggrBgEFBQcwAoYsaHR0cDovL2NydC5jb21vZG9jYS5jb20vQWRkVHJ1c3RV -VE5TR0NDQS5jcnQwNgYIKwYBBQUHMAKGKmh0dHA6Ly9jcnQuY29tb2RvLm5ldC9B -ZGRUcnVzdFVUTlNHQ0NBLmNydDARBglghkgBhvhCAQEEBAMCAgQwDQYJKoZIhvcN -AQEFBQADggEBAK2zEzs+jcIrVK9oDkdDZNvhuBYTdCfpxfFs+OAujW0bIfJAy232 -euVsnJm6u/+OrqKudD2tad2BbejLLXhMZViaCmK7D9nrXHx4te5EP8rL19SUVqLY -1pTnv5dhNgEgvA7n5lIzDSYs7yRLsr7HJsYPr6SeYSuZizyX1SNz7ooJ32/F3X98 -RB0Mlc/E0OyOrkQ9/y5IrnpnaSora8CnUrV5XNOg+kyCz9edCyx4D5wXYcwZPVWz -8aDqquESrezPyjtfi4WRO4s/VD3HLZvOxzMrWAVYCDG9FxaOhF0QGuuG1F7F3GKV -v6prNyCl016kRl2j1UT+a7gLd8fA25A4C9E= +cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEdMBsGA1UEAxMUVEJTIFg1MDkg +Q0EgYnVzaW5lc3MwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDB1PAU +qudCcz3tmyGcf+u6EkZqonKKHrV4gZYbvVkIRojmmlhfi/jwvpHvo8bqSt/9Rj5S +jhCDW0pcbI+IPPtD1Jy+CHNSfnMqVDy6CKQ3p5maTzCMG6ZT+XjnvcND5v+FtaiB +xk1iCX6uvt0jeUtdZvYbyytsSDE6c3Y5//wRxOF8tM1JxibwO3pyER26jbbN2gQz +m/EkdGjLdJ4svPk23WDAvQ6G0/z2LcAaJB+XLfqRwfQpHQvfKa1uTi8PivC8qtip +rmNQMMPMjxSK2azX8cKjjTDJiUKaCb4VHlJDWKEsCFRpgJAoAuX8f7Yfs1M4esGo +sWb3PGspK3O22uIlAgMBAAGjggF6MIIBdjAdBgNVHQ4EFgQUGgkEzPkJj7seoM7U +pBR/S9UjWM0wDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwGAYD +VR0gBBEwDzANBgsrBgEEAYDlNwIBATB7BgNVHR8EdDByMDigNqA0hjJodHRwOi8v +Y3JsLmNvbW9kb2NhLmNvbS9BZGRUcnVzdEV4dGVybmFsQ0FSb290LmNybDA2oDSg +MoYwaHR0cDovL2NybC5jb21vZG8ubmV0L0FkZFRydXN0RXh0ZXJuYWxDQVJvb3Qu +Y3JsMIGGBggrBgEFBQcBAQR6MHgwOwYIKwYBBQUHMAKGL2h0dHA6Ly9jcnQuY29t +b2RvY2EuY29tL0FkZFRydXN0VVROU2VydmVyQ0EuY3J0MDkGCCsGAQUFBzAChi1o +dHRwOi8vY3J0LmNvbW9kby5uZXQvQWRkVHJ1c3RVVE5TZXJ2ZXJDQS5jcnQwEQYJ +YIZIAYb4QgEBBAQDAgIEMA0GCSqGSIb3DQEBBQUAA4IBAQA7mqrMgk/MrE6QnbNA +h4nRCn2ti4bg4w2C3lB6bSvRPnYwuNw9Jb8vuKkNFzRDxNJXqVDZdfFW5CVQJuyd +nfAx83+wk+spzvFaE1KhFYfN9G9pQfXUfvDRoIcJgPEKUXL1wRiOG+IjU3VVI8pg +IgqHkr7ylln5i5zCiFAPuIJmYUSFg/gxH5xkCNcjJqqrHrHatJr6Qrrke93joupw +oU1njfAcZtYp6fbiK6u2b1pJqwkVBE8RsfLnPhRj+SFbpvjv8Od7o/ieJhFIYQNU +k2jX2u8qZnAiNw93LZW9lpYjtuvMXq8QQppENNja5b53q7UwI+lU7ZGjZ7quuESp +J6/5 -----END CERTIFICATE----- 2 s:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root - i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware -----BEGIN CERTIFICATE----- -MIIEZjCCA06gAwIBAgIQUSYKkxzif5zDpV954HKugjANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +MIIETzCCAzegAwIBAgIQHM5EYpUZep1jUvnyI6m2mDANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw0wNTA2MDcwODA5MTBaFw0xOTA2MjQxOTA2MzBaMG8xCzAJBgNVBAYT -AlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0 -ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB -IFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC39xoz5vIABC05 -4E5b7R+8bA/Ntfojts7emxEzl6QpTH2Tn71KvJPtAxrjj8/lbVBa1pcplFqAsEl6 -2y6V/bjKvzc4LR4+kUGtcFbH8E8/6DKedMrIkFTpxl8PeJ2aQDwOrGGqXhSPnoeh -alDc15pOrwWzpnGUnHGzUGAKxxOdOAeGAqjpqGkmGJCrTLBPI6s6T4TY386f4Wlv -u9dC12tE5Met7m1BX3JacQg3s3llpFmglDf3AC8NwpJy2tA4ctsUqEXEXSp9t7TW -xO6szRNEt8kr3UMAJfphuWlqWCMRt6czj1Z1WfXNKddGtworZbbTQm8Vsrh7++/p -XVPVNFonAgMBAAGjgdgwgdUwHwYDVR0jBBgwFoAUUzLRs89/+uDxoF2FTpLSnkUd -tE8wHQYDVR0OBBYEFK29mHo0tCb3+sQmVO8DveAky1QaMA4GA1UdDwEB/wQEAwIB -BjAPBgNVHRMBAf8EBTADAQH/MBEGCWCGSAGG+EIBAQQEAwIBAjAgBgNVHSUEGTAX -BgorBgEEAYI3CgMDBglghkgBhvhCBAEwPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDov -L2NybC51c2VydHJ1c3QuY29tL1VUTi1EQVRBQ29ycFNHQy5jcmwwDQYJKoZIhvcN -AQEFBQADggEBAMbuUxdoFLJRIh6QWA2U/b3xcOWGLcM2MY9USEbnLQg3vGwKYOEO -rVE04BKT6b64q7gmtOmWPSiPrmQH/uAB7MXjkesYoPF1ftsK5p+R26+udd8jkWjd -FwBaS/9kbHDrARrQkNnHptZt9hPk/7XJ0h4qy7ElQyZ42TCbTg0evmnv3+r+LbPM -+bDdtRTKkdSytaX7ARmjR3mfnYyVhzT4HziS2jamEfpr62vp3EV4FTkG101B5CHI -3C+H0be/SGB1pWLLJN47YaApIKa+xWycxOkKaSLvkTr6Jq/RW0GnOuL4OAdCq8Fb -+M5tug8EPzI0rNwEKNdwMBQmBsTkm5jVz3g= +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNMDUwNjA3MDgwOTEwWhcNMTkwNzA5MTgxOTIyWjBvMQswCQYD +VQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0 +IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5h +bCBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt/caM+by +AAQtOeBOW+0fvGwPzbX6I7bO3psRM5ekKUx9k5+9SryT7QMa44/P5W1QWtaXKZRa +gLBJetsulf24yr83OC0ePpFBrXBWx/BPP+gynnTKyJBU6cZfD3idmkA8Dqxhql4U +j56HoWpQ3NeaTq8Fs6ZxlJxxs1BgCscTnTgHhgKo6ahpJhiQq0ywTyOrOk+E2N/O +n+Fpb7vXQtdrROTHre5tQV9yWnEIN7N5ZaRZoJQ39wAvDcKSctrQOHLbFKhFxF0q +fbe01sTurM0TRLfJK91DACX6YblpalgjEbenM49WdVn1zSnXRrcKK2W200JvFbK4 +e/vv6V1T1TRaJwIDAQABo4G9MIG6MB8GA1UdIwQYMBaAFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMB0GA1UdDgQWBBStvZh6NLQm9/rEJlTvA73gJMtUGjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zARBglghkgBhvhCAQEEBAMCAQIwRAYDVR0f +BD0wOzA5oDegNYYzaHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmly +c3QtSGFyZHdhcmUuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQByQhANOs4kClrwF8BW +onvUOGCSjRK52zYZgDXYNjDtmr5rJ6NyPFDNn+JxkLpjYetIFMTbSRe679Bt8m7a +gIAoQYFQtxMuyLnJegB2aEbQiIxh/tC21UcFF7ktdnDoTlA6w3pLuvunaI84Of3o +2YBrhzkTbCfaYk5JRlTpudW9DkUkHBsyx3nknPKnplkIGaK0jgn8E0n+SFabYaHk +I9LroYT/+JtLefh9lgBdAgVv0UPbzoGfuDsrk/Zh+UrgbLFpHoVnElhzbkh64Z0X +OGaJunQc68cCZu5HTn/aK7fBGMcVflRCXLVEQpU9PIAdGA8Ynvg684t8GMaKsRl1 +jIGZ -----END CERTIFICATE----- - 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC - i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware -----BEGIN CERTIFICATE----- -MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG -EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD -VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu -dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 -E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ -D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK -4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq -lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW -bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB -o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT -MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js -LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr -BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB -AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft -Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj -j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH -KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv -2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 -mfnGV/TJVTl4uix5yaaIK/QI +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG +A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe +MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v +d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh +cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn +0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ +M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a +MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd +oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI +DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy +oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0 +dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy +bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF +BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli +CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE +CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t +3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS +KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA== -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/test_mailbox.py b/lib-python/2.7/test/test_mailbox.py --- a/lib-python/2.7/test/test_mailbox.py +++ b/lib-python/2.7/test/test_mailbox.py @@ -38,14 +38,9 @@ def _delete_recursively(self, target): # Delete a file or delete a directory recursively if os.path.isdir(target): - for path, dirs, files in os.walk(target, topdown=False): - for name in files: - os.remove(os.path.join(path, name)) - for name in dirs: - os.rmdir(os.path.join(path, name)) - os.rmdir(target) + test_support.rmtree(target) elif os.path.exists(target): - os.remove(target) + test_support.unlink(target) class TestMailbox(TestBase): @@ -137,6 +132,7 @@ msg = self._box.get(key1) self.assertEqual(msg['from'], 'foo') self.assertEqual(msg.fp.read(), '1') + msg.fp.close() def test_getitem(self): # Retrieve message using __getitem__() @@ -169,10 +165,14 @@ # Get file representations of messages key0 = self._box.add(self._template % 0) key1 = self._box.add(_sample_message) - self.assertEqual(self._box.get_file(key0).read().replace(os.linesep, '\n'), + msg0 = self._box.get_file(key0) + self.assertEqual(msg0.read().replace(os.linesep, '\n'), self._template % 0) - self.assertEqual(self._box.get_file(key1).read().replace(os.linesep, '\n'), + msg1 = self._box.get_file(key1) + self.assertEqual(msg1.read().replace(os.linesep, '\n'), _sample_message) + msg0.close() + msg1.close() def test_get_file_can_be_closed_twice(self): # Issue 11700 @@ -407,6 +407,7 @@ self._box.add(contents[0]) self._box.add(contents[1]) self._box.add(contents[2]) + oldbox = self._box method() if should_call_close: self._box.close() @@ -415,6 +416,7 @@ self.assertEqual(len(keys), 3) for key in keys: self.assertIn(self._box.get_string(key), contents) + oldbox.close() def test_dump_message(self): # Write message representations to disk @@ -1835,6 +1837,10 @@ def setUp(self): # create a new maildir mailbox to work with: self._dir = test_support.TESTFN + if os.path.isdir(self._dir): + test_support.rmtree(self._dir) + if os.path.isfile(self._dir): + test_support.unlink(self._dir) os.mkdir(self._dir) os.mkdir(os.path.join(self._dir, "cur")) os.mkdir(os.path.join(self._dir, "tmp")) @@ -1844,10 +1850,10 @@ def tearDown(self): map(os.unlink, self._msgfiles) - os.rmdir(os.path.join(self._dir, "cur")) - os.rmdir(os.path.join(self._dir, "tmp")) - os.rmdir(os.path.join(self._dir, "new")) - os.rmdir(self._dir) + test_support.rmdir(os.path.join(self._dir, "cur")) + test_support.rmdir(os.path.join(self._dir, "tmp")) + test_support.rmdir(os.path.join(self._dir, "new")) + test_support.rmdir(self._dir) def createMessage(self, dir, mbox=False): t = int(time.time() % 1000000) @@ -1883,7 +1889,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1891,7 +1899,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1900,8 +1910,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 2) - self.assertIsNot(self.mbox.next(), None) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1910,11 +1924,13 @@ import email.parser fname = self.createMessage("cur", True) n = 0 - for msg in mailbox.PortableUnixMailbox(open(fname), + fid = open(fname) + for msg in mailbox.PortableUnixMailbox(fid, email.parser.Parser().parse): n += 1 self.assertEqual(msg["subject"], "Simple Test") self.assertEqual(len(str(msg)), len(FROM_)+len(DUMMY_MESSAGE)) + fid.close() self.assertEqual(n, 1) ## End: classes from the original module (for backward compatibility). diff --git a/lib-python/2.7/test/test_mmap.py b/lib-python/2.7/test/test_mmap.py --- a/lib-python/2.7/test/test_mmap.py +++ b/lib-python/2.7/test/test_mmap.py @@ -11,7 +11,7 @@ def setUp(self): if os.path.exists(TESTFN): - os.unlink(TESTFN) + unlink(TESTFN) def tearDown(self): try: diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # diff --git a/lib-python/2.7/test/test_old_mailbox.py b/lib-python/2.7/test/test_old_mailbox.py --- a/lib-python/2.7/test/test_old_mailbox.py +++ b/lib-python/2.7/test/test_old_mailbox.py @@ -73,7 +73,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -81,7 +83,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -90,8 +94,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 2) - self.assertTrue(self.mbox.next() is not None) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -75,7 +75,7 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + open(name, "w").close() self.files.append(name) def test_tempnam(self): diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -111,13 +111,12 @@ if test_support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") self.assertEqual(p['subject'], - ((('countryName', u'US'),), - (('stateOrProvinceName', u'Delaware'),), - (('localityName', u'Wilmington'),), - (('organizationName', u'Python Software Foundation'),), - (('organizationalUnitName', u'SSL'),), - (('commonName', u'somemachine.python.org'),)), + ((('countryName', 'XY'),), + (('localityName', 'Castle Anthrax'),), + (('organizationName', 'Python Software Foundation'),), + (('commonName', 'localhost'),)) ) + self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),)) # Issue #13034: the subjectAltName in some certificates # (notably projects.developer.nokia.com:443) wasn't parsed p = ssl._ssl._test_decode_cert(NOKIACERT) diff --git a/lib-python/2.7/test/test_support.py b/lib-python/2.7/test/test_support.py --- a/lib-python/2.7/test/test_support.py +++ b/lib-python/2.7/test/test_support.py @@ -179,15 +179,79 @@ except KeyError: pass +if sys.platform.startswith("win"): + def _waitfor(func, pathname, waitall=False): + # Peform the operation + func(pathname) + # Now setup the wait loop + if waitall: + dirname = pathname + else: + dirname, name = os.path.split(pathname) + dirname = dirname or '.' + # Check for `pathname` to be removed from the filesystem. + # The exponential backoff of the timeout amounts to a total + # of ~1 second after which the deletion is probably an error + # anyway. + # Testing on a i7 at 4.3GHz shows that usually only 1 iteration is + # required when contention occurs. + timeout = 0.001 + while timeout < 1.0: + # Note we are only testing for the existance of the file(s) in + # the contents of the directory regardless of any security or + # access rights. If we have made it this far, we have sufficient + # permissions to do that much using Python's equivalent of the + # Windows API FindFirstFile. + # Other Windows APIs can fail or give incorrect results when + # dealing with files that are pending deletion. + L = os.listdir(dirname) + if not (L if waitall else name in L): + return + # Increase the timeout and try again + time.sleep(timeout) + timeout *= 2 + warnings.warn('tests may fail, delete still pending for ' + pathname, + RuntimeWarning, stacklevel=4) + + def _unlink(filename): + _waitfor(os.unlink, filename) + + def _rmdir(dirname): + _waitfor(os.rmdir, dirname) + + def _rmtree(path): + def _rmtree_inner(path): + for name in os.listdir(path): + fullname = os.path.join(path, name) + if os.path.isdir(fullname): + _waitfor(_rmtree_inner, fullname, waitall=True) + os.rmdir(fullname) + else: + os.unlink(fullname) + _waitfor(_rmtree_inner, path, waitall=True) + _waitfor(os.rmdir, path) +else: + _unlink = os.unlink + _rmdir = os.rmdir + _rmtree = shutil.rmtree + def unlink(filename): try: - os.unlink(filename) + _unlink(filename) except OSError: pass +def rmdir(dirname): + try: + _rmdir(dirname) + except OSError as error: + # The directory need not exist. + if error.errno != errno.ENOENT: + raise + def rmtree(path): try: - shutil.rmtree(path) + _rmtree(path) except OSError, e: # Unix returns ENOENT, Windows returns ESRCH. if e.errno not in (errno.ENOENT, errno.ESRCH): diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/2.7/test/test_tarfile.py --- a/lib-python/2.7/test/test_tarfile.py +++ b/lib-python/2.7/test/test_tarfile.py @@ -300,26 +300,21 @@ def test_extract_hardlink(self): # Test hardlink extraction (e.g. bug #857297). - tar = tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") + with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar: + tar.extract("ustar/regtype", TEMPDIR) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/regtype")) - tar.extract("ustar/regtype", TEMPDIR) - try: tar.extract("ustar/lnktype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("hardlink not extracted properly") + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/lnktype")) + with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) - data = open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) - - try: tar.extract("ustar/symtype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("symlink not extracted properly") - - data = open(os.path.join(TEMPDIR, "ustar/symtype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/symtype")) + with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) def test_extractall(self): # Test if extractall() correctly restores directory permissions @@ -340,7 +335,7 @@ # constructor in case of an error. For the test we rely on # the fact that opening an empty file raises a ReadError. empty = os.path.join(TEMPDIR, "empty") - open(empty, "wb").write("") + open(empty, "wb").close() try: tar = object.__new__(tarfile.TarFile) @@ -351,7 +346,7 @@ else: self.fail("ReadError not raised") finally: - os.remove(empty) + test_support.unlink(empty) class StreamReadTest(CommonReadTest): @@ -1327,7 +1322,7 @@ def setUp(self): self.tarname = tmpname if os.path.exists(self.tarname): - os.remove(self.tarname) + test_support.unlink(self.tarname) def _add_testfile(self, fileobj=None): tar = tarfile.open(self.tarname, "a", fileobj=fileobj) diff --git a/lib-python/2.7/traceback.py b/lib-python/2.7/traceback.py --- a/lib-python/2.7/traceback.py +++ b/lib-python/2.7/traceback.py @@ -107,7 +107,7 @@ return list -def print_exception(etype, value, tb, limit=None, file=None): +def print_exception(etype, value, tb, limit=None, file=None, _encoding=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if @@ -123,7 +123,7 @@ if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) - lines = format_exception_only(etype, value) + lines = format_exception_only(etype, value, _encoding) for line in lines: _print(file, line, '') @@ -144,7 +144,7 @@ list = list + format_exception_only(etype, value) return list -def format_exception_only(etype, value): +def format_exception_only(etype, value, _encoding=None): """Format the exception part of a traceback. The arguments are the exception type and value such as given by @@ -170,12 +170,12 @@ if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or etype is None or type(etype) is str): - return [_format_final_exc_line(etype, value)] + return [_format_final_exc_line(etype, value, _encoding)] stype = etype.__name__ if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] + return [_format_final_exc_line(stype, value, _encoding)] # It was a syntax error; show exactly where the problem was found. lines = [] @@ -196,26 +196,26 @@ lines.append(' %s^\n' % ''.join(caretspace)) value = msg - lines.append(_format_final_exc_line(stype, value)) + lines.append(_format_final_exc_line(stype, value, _encoding)) return lines -def _format_final_exc_line(etype, value): +def _format_final_exc_line(etype, value, _encoding=None): """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) + valuestr = _some_str(value, _encoding) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line -def _some_str(value): +def _some_str(value, _encoding=None): try: return str(value) except Exception: pass try: value = unicode(value) - return value.encode("ascii", "backslashreplace") + return value.encode(_encoding or "ascii", "backslashreplace") except Exception: pass return '' % type(value).__name__ diff --git a/lib-python/2.7/uuid.py b/lib-python/2.7/uuid.py --- a/lib-python/2.7/uuid.py +++ b/lib-python/2.7/uuid.py @@ -127,26 +127,39 @@ overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'. """ - if [hex, bytes, bytes_le, fields, int].count(None) != 4: - raise TypeError('need one of hex, bytes, bytes_le, fields, or int') if hex is not None: + if (bytes is not None or bytes_le is not None or + fields is not None or int is not None): + raise TypeError('if the hex argument is given, bytes,' + ' bytes_le, fields, and int need to be None') hex = hex.replace('urn:', '').replace('uuid:', '') hex = hex.strip('{}').replace('-', '') if len(hex) != 32: raise ValueError('badly formed hexadecimal UUID string') int = long(hex, 16) - if bytes_le is not None: + elif bytes_le is not None: + if bytes is not None or fields is not None or int is not None: + raise TypeError('if the bytes_le argument is given, bytes,' + ' fields, and int need to be None') if len(bytes_le) != 16: raise ValueError('bytes_le is not a 16-char string') bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] + bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] + bytes_le[8:]) - if bytes is not None: + int = (struct.unpack('>Q', bytes[:8])[0] << 64 | + struct.unpack('>Q', bytes[8:])[0]) + elif bytes is not None: + if fields is not None or int is not None: + raise TypeError('if the bytes argument is given, fields ' + 'and int need to be None') if len(bytes) != 16: raise ValueError('bytes is not a 16-char string') int = (struct.unpack('>Q', bytes[:8])[0] << 64 | struct.unpack('>Q', bytes[8:])[0]) - if fields is not None: + elif fields is not None: + if int is not None: + raise TypeError('if the fields argument is given, int needs' + ' to be None') if len(fields) != 6: raise ValueError('fields is not a 6-tuple') (time_low, time_mid, time_hi_version, @@ -166,9 +179,12 @@ clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low int = ((time_low << 96L) | (time_mid << 80L) | (time_hi_version << 64L) | (clock_seq << 48L) | node) - if int is not None: + elif int is not None: if not 0 <= int < 1<<128L: raise ValueError('int is out of range (need a 128-bit value)') + else: + raise TypeError('one of hex, bytes, bytes_le, fields,' + ' or int need to be not None') if version is not None: if not 1 <= version <= 5: raise ValueError('illegal version number') @@ -178,7 +194,7 @@ # Set the version number. int &= ~(0xf000 << 64L) int |= version << 76L - self.__dict__['int'] = int + object.__setattr__(self, 'int', int) def __cmp__(self, other): if isinstance(other, UUID): diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -48,7 +48,14 @@ def remove(wr, selfref=ref(self)): self = selfref() if self is not None: - del self.data[wr.key] + # Changed this for PyPy: made more resistent. The + # issue is that in some corner cases, self.data + # might already be changed or removed by the time + # this weakref's callback is called. If that is + # the case, we don't want to randomly kill an + # unrelated entry. + if self.data.get(wr.key) is wr: + del self.data[wr.key] self._remove = remove UserDict.UserDict.__init__(self, *args, **kw) @@ -160,22 +167,26 @@ try: o = self.data.pop(key)() except KeyError: + o = None + if o is None: if args: return args[0] - raise - if o is None: raise KeyError, key else: return o + # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: - wr = self.data[key] + o = self.data[key]() except KeyError: + o = None + if o is None: self.data[key] = KeyedRef(default, self._remove, key) return default else: - return wr() + return o + # The logic above was fixed in PyPy def update(self, dict=None, **kwargs): d = self.data diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -1,6 +1,9 @@ """Reimplementation of the standard extension module '_curses' using cffi.""" import sys +if sys.platform == 'win32': + #This module does not exist in windows + raise ImportError('No module named _curses') from functools import wraps from cffi import FFI diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -268,10 +268,18 @@ if _has_load_extension(): _ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);") -_lib = _ffi.verify(""" -#include -""", libraries=['sqlite3'] -) +if sys.platform.startswith('freebsd'): + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'] + ) +else: + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'] + ) exported_sqlite_symbols = [ 'SQLITE_ALTER_TABLE', @@ -363,9 +371,11 @@ pass -def connect(database, **kwargs): - factory = kwargs.get("factory", Connection) - return factory(database, **kwargs) +def connect(database, timeout=5.0, detect_types=0, isolation_level="", + check_same_thread=True, factory=None, cached_statements=100): + factory = Connection if not factory else factory + return factory(database, timeout, detect_types, isolation_level, + check_same_thread, factory, cached_statements) def _unicode_text_factory(x): diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py --- a/lib_pypy/_tkinter/__init__.py +++ b/lib_pypy/_tkinter/__init__.py @@ -22,6 +22,7 @@ READABLE = tklib.TCL_READABLE WRITABLE = tklib.TCL_WRITABLE EXCEPTION = tklib.TCL_EXCEPTION +DONT_WAIT = tklib.TCL_DONT_WAIT def create(screenName=None, baseName=None, className=None, interactive=False, wantobjects=False, wantTk=True, diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -4,7 +4,23 @@ from . import TclError from .tclobj import TclObject, FromObj, AsObj, TypeCache +import contextlib import sys +import threading +import time + + +class _DummyLock(object): + "A lock-like object that does not do anything" + def acquire(self): + pass + def release(self): + pass + def __enter__(self): + pass + def __exit__(self, *exc): + pass + def varname_converter(input): if isinstance(input, TclObject): @@ -37,17 +53,18 @@ def PythonCmd(clientData, interp, argc, argv): self = tkffi.from_handle(clientData) assert self.app.interp == interp - try: - args = [tkffi.string(arg) for arg in argv[1:argc]] - result = self.func(*args) - obj = AsObj(result) - tklib.Tcl_SetObjResult(interp, obj) - except: - self.app.errorInCmd = True - self.app.exc_info = sys.exc_info() - return tklib.TCL_ERROR - else: - return tklib.TCL_OK + with self.app._tcl_lock_released(): + try: + args = [tkffi.string(arg) for arg in argv[1:argc]] + result = self.func(*args) + obj = AsObj(result) + tklib.Tcl_SetObjResult(interp, obj) + except: + self.app.errorInCmd = True + self.app.exc_info = sys.exc_info() + return tklib.TCL_ERROR + else: + return tklib.TCL_OK @tkffi.callback("Tcl_CmdDeleteProc") def PythonCmdDelete(clientData): @@ -58,6 +75,8 @@ class TkApp(object): + _busywaitinterval = 0.02 # 20ms. + def __new__(cls, screenName, baseName, className, interactive, wantobjects, wantTk, sync, use): if not wantobjects: @@ -73,6 +92,12 @@ self.quitMainLoop = False self.errorInCmd = False + if not self.threaded: + # TCL is not thread-safe, calls needs to be serialized. + self._tcl_lock = threading.Lock() + else: + self._tcl_lock = _DummyLock() + self._typeCache = TypeCache() self._commands = {} @@ -133,6 +158,13 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise RuntimeError("Calling Tcl from different appartment") + @contextlib.contextmanager + def _tcl_lock_released(self): + "Context manager to temporarily release the tcl lock." + self._tcl_lock.release() + yield + self._tcl_lock.acquire() + def loadtk(self): # We want to guard against calling Tk_Init() multiple times err = tklib.Tcl_Eval(self.interp, "info exists tk_version") @@ -159,22 +191,25 @@ flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) - if not res: - self.raiseTclError() - assert self._wantobjects - return FromObj(self, res) + with self._tcl_lock: + res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) + if not res: + self.raiseTclError() + assert self._wantobjects + return FromObj(self, res) def _setvar(self, name1, value, global_only=False): name1 = varname_converter(name1) + # XXX Acquire tcl lock??? newval = AsObj(value) flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, - newval, flags) - if not res: - self.raiseTclError() + with self._tcl_lock: + res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, + newval, flags) + if not res: + self.raiseTclError() def _unsetvar(self, name1, name2=None, global_only=False): name1 = varname_converter(name1) @@ -183,9 +218,10 @@ flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) - if res == tklib.TCL_ERROR: - self.raiseTclError() + with self._tcl_lock: + res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() def getvar(self, name1, name2=None): return self._var_invoke(self._getvar, name1, name2) @@ -219,9 +255,10 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise NotImplementedError("Call from another thread") - res = tklib.Tcl_CreateCommand( - self.interp, cmdName, _CommandData.PythonCmd, - clientData, _CommandData.PythonCmdDelete) + with self._tcl_lock: + res = tklib.Tcl_CreateCommand( + self.interp, cmdName, _CommandData.PythonCmd, + clientData, _CommandData.PythonCmdDelete) if not res: raise TclError("can't create Tcl command") @@ -229,7 +266,8 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise NotImplementedError("Call from another thread") - res = tklib.Tcl_DeleteCommand(self.interp, cmdName) + with self._tcl_lock: + res = tklib.Tcl_DeleteCommand(self.interp, cmdName) if res == -1: raise TclError("can't delete Tcl command") @@ -256,11 +294,12 @@ tklib.Tcl_IncrRefCount(obj) objects[i] = obj - res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) - if res == tklib.TCL_ERROR: - self.raiseTclError() - else: - result = self._callResult() + with self._tcl_lock: + res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + else: + result = self._callResult() finally: for obj in objects: if obj: @@ -280,17 +319,19 @@ def eval(self, script): self._check_tcl_appartment() - res = tklib.Tcl_Eval(self.interp, script) - if res == tklib.TCL_ERROR: - self.raiseTclError() - return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + with self._tcl_lock: + res = tklib.Tcl_Eval(self.interp, script) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) def evalfile(self, filename): self._check_tcl_appartment() - res = tklib.Tcl_EvalFile(self.interp, filename) - if res == tklib.TCL_ERROR: - self.raiseTclError() - return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + with self._tcl_lock: + res = tklib.Tcl_EvalFile(self.interp, filename) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) def split(self, arg): if isinstance(arg, tuple): @@ -375,7 +416,10 @@ if self.threaded: result = tklib.Tcl_DoOneEvent(0) else: - raise NotImplementedError("TCL configured without threads") + with self._tcl_lock: + result = tklib.Tcl_DoOneEvent(tklib.TCL_DONT_WAIT) + if result == 0: + time.sleep(self._busywaitinterval) if result < 0: break diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py --- a/lib_pypy/_tkinter/tclobj.py +++ b/lib_pypy/_tkinter/tclobj.py @@ -28,9 +28,11 @@ return result elif value.typePtr == typeCache.BooleanType: - return result + return bool(value.internalRep.longValue) elif value.typePtr == typeCache.ByteArrayType: - return result + size = tkffi.new('int*') + data = tklib.Tcl_GetByteArrayFromObj(value, size) + return tkffi.buffer(data, size[0])[:] elif value.typePtr == typeCache.DoubleType: return value.internalRep.doubleValue elif value.typePtr == typeCache.IntType: @@ -50,7 +52,7 @@ result.append(FromObj(app, tcl_elem[0])) return tuple(result) elif value.typePtr == typeCache.ProcBodyType: - return result + pass # fall through and return tcl object. elif value.typePtr == typeCache.StringType: buf = tklib.Tcl_GetUnicode(value) length = tklib.Tcl_GetCharLength(value) diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -1,6 +1,7 @@ # C bindings with libtcl and libtk. from cffi import FFI +import sys tkffi = FFI() @@ -18,6 +19,8 @@ #define TCL_EVAL_DIRECT ... #define TCL_EVAL_GLOBAL ... +#define TCL_DONT_WAIT ... + typedef unsigned short Tcl_UniChar; typedef ... Tcl_Interp; typedef ...* Tcl_ThreadId; @@ -69,6 +72,7 @@ int Tcl_GetBoolean(Tcl_Interp* interp, const char* src, int* boolPtr); char *Tcl_GetString(Tcl_Obj* objPtr); char *Tcl_GetStringFromObj(Tcl_Obj* objPtr, int* lengthPtr); +unsigned char *Tcl_GetByteArrayFromObj(Tcl_Obj* objPtr, int* lengthPtr); Tcl_UniChar *Tcl_GetUnicode(Tcl_Obj* objPtr); int Tcl_GetCharLength(Tcl_Obj* objPtr); @@ -102,6 +106,25 @@ int Tk_GetNumMainWindows(); """) +# XXX find a better way to detect paths +# XXX pick up CPPFLAGS and LDFLAGS and add to these paths? +if sys.platform.startswith("openbsd"): + incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] + linklibs = ['tk85', 'tcl85'] + libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform.startswith("freebsd"): + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include'] + linklibs = ['tk86', 'tcl86'] + libdirs = ['/usr/local/lib'] +elif sys.platform == 'win32': + incdirs = [] + linklibs = ['tcl85', 'tk85'] + libdirs = [] +else: + incdirs=['/usr/include/tcl'] + linklibs=['tcl', 'tk'] + libdirs = [] + tklib = tkffi.verify(""" #include #include @@ -109,6 +132,7 @@ char *get_tk_version() { return TK_VERSION; } char *get_tcl_version() { return TCL_VERSION; } """, -include_dirs=['/usr/include/tcl'], -libraries=['tcl', 'tk'], +include_dirs=incdirs, +libraries=linklibs, +library_dirs = libdirs ) diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.7 +Version: 0.8 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7" -__version_info__ = (0, 7) +__version__ = "0.8" +__version_info__ = (0, 8) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,5 @@ import types +from .lock import allocate_lock try: callable @@ -54,12 +55,14 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert backend.__version__ == __version__ + assert (backend.__version__ == __version__ or + backend.__version__ == __version__[:3]) # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) self._backend = backend + self._lock = allocate_lock() self._parser = cparser.Parser() self._cached_btypes = {} self._parsed_types = types.ModuleType('parsed_types').__dict__ @@ -73,7 +76,8 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - self.BVoidP = self._get_cached_btype(model.voidp_type) + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -94,11 +98,12 @@ if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') - self._parser.parse(csource, override=override) - self._cdefsources.append(csource) - if override: - for cache in self._function_caches: - cache.clear() + with self._lock: + self._parser.parse(csource, override=override) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -108,31 +113,47 @@ library we only look for the actual (untyped) symbols. """ assert isinstance(name, basestring) or name is None - lib, function_cache = _make_ffi_library(self, name, flags) - self._function_caches.append(function_cache) - self._libraries.append(lib) + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) return lib + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + really_a_function_type = type.is_raw_function + if really_a_function_type: + type = type.as_function_pointer() + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, cfaf = self._parsed_types[cdecl] - if consider_function_as_funcptr and not cfaf: - raise KeyError + result = self._parsed_types[cdecl] except KeyError: - key = cdecl - if not isinstance(cdecl, str): # unicode, on Python 2 - cdecl = cdecl.encode('ascii') - cfaf = consider_function_as_funcptr - type = self._parser.parse_type(cdecl, - consider_function_as_funcptr=cfaf) - btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, cfaf + with self._lock: + result = self._typeof_locked(cdecl) + # + btype, really_a_function_type = result + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) return btype def typeof(self, cdecl): """Parse the C type given as a string and return the - corresponding Python type: '>. + corresponding object. It can also be used on 'cdata' instance to get its C type. """ if isinstance(cdecl, basestring): @@ -143,6 +164,10 @@ res = _builtin_function_type(cdecl) if res is not None: return res + if (isinstance(cdecl, types.FunctionType) + and hasattr(cdecl, '_cffi_base_type')): + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) def sizeof(self, cdecl): @@ -279,14 +304,17 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + with self._lock: + try: + gc_weakrefs = self.gc_weakrefs + except AttributeError: + from .gc_weakref import GcWeakrefs + gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) + return gc_weakrefs.build(cdata, destructor) def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! try: BType = self._cached_btypes[type] except KeyError: @@ -319,9 +347,13 @@ errno = property(_get_errno, _set_errno, None, "the value of 'errno' from/to the C calls") + def getwinerror(self, code=-1): + return self._backend.getwinerror(code) + def _pointer_to(self, ctype): from . import model - return model.pointer_cache(self, ctype) + with self._lock: + return model.pointer_cache(self, ctype) def addressof(self, cdata, field=None): """Return the address of a . @@ -341,10 +373,12 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ - self._parser.include(ffi_to_include._parser) - self._cdefsources.append('[') - self._cdefsources.extend(ffi_to_include._cdefsources) - self._cdefsources.append(']') + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) @@ -371,7 +405,7 @@ backendlib = backend.load_library(path, flags) copied_enums = [] # - def make_accessor(name): + def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: tp = ffi._parser._declarations[key] @@ -403,11 +437,17 @@ if enumname not in library.__dict__: library.__dict__[enumname] = enumval copied_enums.append(True) + if name in library.__dict__: + return # - if name in library.__dict__: # copied from an enum value just above, - return # or multithread's race condition raise AttributeError(name) # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + make_accessor_locked(name) + # class FFILibrary(object): def __getattr__(self, name): make_accessor(name) @@ -443,4 +483,5 @@ except (KeyError, AttributeError, TypeError): return None else: - return ffi._get_cached_btype(tp) + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/lib_pypy/cffi/commontypes.py b/lib_pypy/cffi/commontypes.py --- a/lib_pypy/cffi/commontypes.py +++ b/lib_pypy/cffi/commontypes.py @@ -30,7 +30,9 @@ elif result in model.PrimitiveType.ALL_PRIMITIVE_TYPES: result = model.PrimitiveType(result) else: - assert commontype != result + if commontype == result: + raise api.FFIError("Unsupported type: %r. Please file a bug " + "if you think it should be." % (commontype,)) result = resolve_common_type(result) # recursively assert isinstance(result, model.BaseTypeByIdentity) _CACHE[commontype] = result diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -142,7 +142,7 @@ if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] if line: - msg = 'cannot parse "%s"\n%s' % (line, msg) + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) @@ -217,19 +217,18 @@ # if decl.name: tp = self._get_type(node, partial_length_ok=True) - if self._is_constant_declaration(node): + if self._is_constant_globalvar(node): self._declare('constant ' + decl.name, tp) else: self._declare('variable ' + decl.name, tp) - def parse_type(self, cdecl, consider_function_as_funcptr=False): + def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type, - consider_function_as_funcptr=consider_function_as_funcptr) + return self._get_type(exprnode.type) def _declare(self, name, obj): if name in self._declarations: @@ -249,28 +248,17 @@ return model.ConstPointerType(type) return model.PointerType(type) - def _get_type(self, typenode, convert_array_to_pointer=False, - name=None, partial_length_ok=False, - consider_function_as_funcptr=False): + def _get_type(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): type = self._declarations['typedef ' + typenode.type.names[0]] - if isinstance(type, model.ArrayType): - if convert_array_to_pointer: - return type.item - else: - if (consider_function_as_funcptr and - isinstance(type, model.RawFunctionType)): - return type.as_function_pointer() return type # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type - if convert_array_to_pointer: - return self._get_type_pointer(self._get_type(typenode.type)) if typenode.dim is None: length = None else: @@ -290,13 +278,26 @@ # assume a primitive type. get it from .names, but reduce # synonyms to a single chosen combination names = list(type.names) - if names == ['signed'] or names == ['unsigned']: - names.append('int') - if names[0] == 'signed' and names != ['signed', 'char']: - names.pop(0) - if (len(names) > 1 and names[-1] == 'int' - and names != ['unsigned', 'int']): - names.pop() + if names != ['signed', 'char']: # keep this unmodified + prefixes = {} + while names: + name = names[0] + if name in ('short', 'long', 'signed', 'unsigned'): + prefixes[name] = prefixes.get(name, 0) + 1 + del names[0] + else: + break + # ignore the 'signed' prefix below, and reorder the others + newnames = [] + for prefix in ('unsigned', 'short', 'long'): + for i in range(prefixes.get(prefix, 0)): + newnames.append(prefix) + if not names: + names = ['int'] # implicitly + if names == ['int']: # but kill it if 'short' or 'long' + if 'short' in prefixes or 'long' in prefixes: + names = [] + names = newnames + names ident = ' '.join(names) if ident == 'void': return model.void_type @@ -318,10 +319,7 @@ # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - result = self._parse_function_type(typenode, name) - if consider_function_as_funcptr: - result = result.as_function_pointer() - return result + return self._parse_function_type(typenode, name) # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): @@ -352,21 +350,24 @@ isinstance(params[0].type.type, pycparser.c_ast.IdentifierType) and list(params[0].type.type.names) == ['void']): del params[0] - args = [self._get_type(argdeclnode.type, - convert_array_to_pointer=True, - consider_function_as_funcptr=True) + args = [self._as_func_arg(self._get_type(argdeclnode.type)) for argdeclnode in params] result = self._get_type(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _is_constant_declaration(self, typenode, const=False): - if isinstance(typenode, pycparser.c_ast.ArrayDecl): - return self._is_constant_declaration(typenode.type) + def _as_func_arg(self, type): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _is_constant_globalvar(self, typenode): if isinstance(typenode, pycparser.c_ast.PtrDecl): - const = 'const' in typenode.quals - return self._is_constant_declaration(typenode.type, const) + return 'const' in typenode.quals if isinstance(typenode, pycparser.c_ast.TypeDecl): - return const or 'const' in typenode.quals + return 'const' in typenode.quals return False def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): @@ -478,7 +479,7 @@ return tp def _make_partial(self, tp, nested): - if not isinstance(tp, model.StructType): + if not isinstance(tp, model.StructOrUnion): raise api.CDefError("%s cannot be partial" % (tp,)) if not tp.has_c_name() and not nested: raise NotImplementedError("%s is partial but has no C name" %(tp,)) @@ -498,10 +499,10 @@ if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): self._partial_length = True - return None + return '...' # - raise api.FFIError("unsupported non-constant or " - "not immediately constant expression") + raise api.FFIError("unsupported expression: expected a " + "simple numeric constant") def _build_enum_type(self, explicit_name, decls): if decls is not None: diff --git a/lib_pypy/cffi/gc_weakref.py b/lib_pypy/cffi/gc_weakref.py --- a/lib_pypy/cffi/gc_weakref.py +++ b/lib_pypy/cffi/gc_weakref.py @@ -14,6 +14,6 @@ def build(self, cdata, destructor): # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi.typeof(cdata), cdata) + new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) self.data[ref(new_cdata, self.remove)] = destructor, cdata return new_cdata diff --git a/lib_pypy/cffi/lock.py b/lib_pypy/cffi/lock.py new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,7 +1,10 @@ import weakref +from .lock import allocate_lock + class BaseTypeByIdentity(object): is_array_type = False + is_raw_function = False def get_c_name(self, replace_with='', context='a C file'): result = self.c_name_with_marker @@ -146,6 +149,7 @@ # a function, but not a pointer-to-function. The backend has no # notion of such a type; it's used temporarily by parsing. _base_pattern = '(&)(%s)' + is_raw_function = True def build_backend_type(self, ffi, finishlist): from . import api @@ -192,10 +196,6 @@ _base_pattern = " const *&" _base_pattern_array = "(const *&)" - def build_backend_type(self, ffi, finishlist): - BPtr = PointerType(self.totype).get_cached_btype(ffi, finishlist) - return BPtr - const_voidp_type = ConstPointerType(void_type) @@ -216,10 +216,12 @@ self.item = item self.length = length # - if self.length is None: + if length is None: brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' else: - brackets = '&[%d]' % self.length + brackets = '&[%d]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) @@ -227,6 +229,10 @@ return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): + if self.length == '...': + from . import api + raise api.CDefError("cannot render the type %r: unknown length" % + (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) @@ -252,6 +258,7 @@ class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None completed = False + partial = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -303,20 +310,21 @@ return # not completing it: it's an opaque struct # self.completed = 1 - fldtypes = tuple(tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes) # if self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) ffi._backend.complete_struct_or_union(BType, lst, self) # else: + fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # From noreply at buildbot.pypy.org Wed Nov 20 02:04:59 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 20 Nov 2013 02:04:59 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Adapt imports / test class names. Message-ID: <20131120010459.12E781C237F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68251:ccaea346a3e8 Date: 2013-11-20 01:56 +0100 http://bitbucket.org/pypy/pypy/changeset/ccaea346a3e8/ Log: Adapt imports / test class names. diff --git a/rpython/translator/llvm/test/test_genllvm.py b/rpython/translator/llvm/test/test_genllvm.py --- a/rpython/translator/llvm/test/test_genllvm.py +++ b/rpython/translator/llvm/test/test_genllvm.py @@ -10,7 +10,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi, llmemory, llgroup from rpython.rtyper.lltypesystem.ll2ctypes import (force_cast, get_ctypes_type, lltype2ctypes, ctypes2lltype) -from rpython.rtyper.lltypesystem.rtuple import TupleRepr +from rpython.rtyper.rtuple import TupleRepr from rpython.rtyper.lltypesystem.rstr import StringRepr, UnicodeRepr from rpython.rtyper.lltypesystem.test.test_rffi import BaseTestRffi from rpython.rtyper.module.support import LLSupport @@ -18,7 +18,7 @@ test_generator, test_rbool, test_rbuilder, test_rbuiltin, test_rclass, test_rconstantdict, test_rdict, test_remptydict, test_rfloat, test_rint, test_rlist, test_rpbc, test_rrange, test_rstr, - test_rtuple, test_runicode, test_rvirtualizable2, test_rweakref) + test_rtuple, test_runicode, test_rvirtualizable, test_rweakref) from rpython.rtyper.typesystem import getfunctionptr from rpython.translator.backendopt.all import backend_optimizations from rpython.translator.backendopt.raisingop2direct_call import ( @@ -750,46 +750,46 @@ class TestRtypingLLVM(_LLVMMixin, test_annlowlevel.TestLLType): pass -class TestExceptionLLVM(_LLVMMixin, test_exception.TestLLtype): +class TestExceptionLLVM(_LLVMMixin, test_exception.TestException): def test_raise_and_catch_other(self): py.test.skip('Impossible to pass if not running on LLInterpreter.') def test_raise_prebuilt_and_catch_other(self): py.test.skip('Impossible to pass if not running on LLInterpreter.') -class TestGeneratorLLVM(_LLVMMixin, test_generator.TestLLtype): +class TestGeneratorLLVM(_LLVMMixin, test_generator.TestGenerator): pass -class TestRboolLLVM(_LLVMMixin, test_rbool.TestLLtype): +class TestRboolLLVM(_LLVMMixin, test_rbool.TestRbool): pass -class TestStringBuilderLLVM(_LLVMMixin, test_rbuilder.TestLLtype): +class TestStringBuilderLLVM(_LLVMMixin, test_rbuilder.TestStringBuilder): pass -class TestRbuiltinLLVM(_LLVMMixin, test_rbuiltin.TestLLtype): +class TestRbuiltinLLVM(_LLVMMixin, test_rbuiltin.TestRbuiltin): def test_debug_llinterpcall(self): py.test.skip('Impossible to pass if not running on LLInterpreter.') -class TestRclassLLVM(_LLVMMixin, test_rclass.TestLLtype): +class TestRclassLLVM(_LLVMMixin, test_rclass.TestRclass): pass -class TestRconstantdictLLVM(_LLVMMixin, test_rconstantdict.TestLLtype): +class TestRconstantdictLLVM(_LLVMMixin, test_rconstantdict.TestRconstantdict): pass -class TestRdictLLVM(_LLVMMixin, test_rdict.TestLLtype): +class TestRdictLLVM(_LLVMMixin, test_rdict.TestRDict): def test_memoryerror_should_not_insert(self): py.test.skip('Impossible to pass if not running on LLInterpreter.') -class TestRemptydictLLVM(_LLVMMixin, test_remptydict.TestLLtype): +class TestRemptydictLLVM(_LLVMMixin, test_remptydict.TestRemptydict): pass -class TestRfloatLLVM(_LLVMMixin, test_rfloat.TestLLtype): +class TestRfloatLLVM(_LLVMMixin, test_rfloat.TestRfloat): pass -class TestRintLLVM(_LLVMMixin, test_rint.TestLLtype): +class TestRintLLVM(_LLVMMixin, test_rint.TestRint): pass -class TestRlistLLVM(_LLVMMixin, test_rlist.TestLLtype): +class TestRlistLLVM(_LLVMMixin, test_rlist.TestRlist): def test_iterate_over_immutable_list(self): py.test.skip('Impossible to pass if not running on LLInterpreter.') @@ -809,7 +809,7 @@ return False return True -class TestRPBCLLVM(_LLVMMixin, test_rpbc.TestLLtype): +class TestRPBCLLVM(_LLVMMixin, test_rpbc.TestRPBC): def read_attr(self, value, attr_name): class_name = 'pypy.rpython.test.test_rpbc.' + self.class_name(value) for (cd, _), ir in self._translator.rtyper.instance_reprs.items(): @@ -825,27 +825,27 @@ return attr raise AttributeError() -class TestRPBCExtraLLVM(_LLVMMixin, test_rpbc.TestExtraLLtype): +class TestRPBCExtraLLVM(_LLVMMixin, test_rpbc.TestRPBCExtra): pass -class TestRrangeLLVM(_LLVMMixin, test_rrange.TestLLtype): +class TestRrangeLLVM(_LLVMMixin, test_rrange.TestRrange): pass -class TestRstrLLVM(_LLVMMixin, test_rstr.TestLLtype): +class TestRstrLLVM(_LLVMMixin, test_rstr.TestRstr): def test_getitem_exc(self): py.test.skip('Impossible to pass if not running on LLInterpreter.') -class TestRtupleLLVM(_LLVMMixin, test_rtuple.TestLLtype): +class TestRtupleLLVM(_LLVMMixin, test_rtuple.TestRtuple): pass -class TestRUnicodeLLVM(_LLVMMixin, test_runicode.TestLLtype): +class TestRUnicodeLLVM(_LLVMMixin, test_runicode.TestRUnicode): def test_getitem_exc(self): py.test.skip('Impossible to pass if not running on LLInterpreter.') -class TestRvirtualizableLLVM(_LLVMMixin, test_rvirtualizable2.TestLLtype): +class TestRvirtualizableLLVM(_LLVMMixin, test_rvirtualizable.TestVirtualizable): pass -class TestRweakrefLLVM(_LLVMMixin, test_rweakref.TestLLtype): +class TestRweakrefLLVM(_LLVMMixin, test_rweakref.TestRweakref): def _compile(self, *args, **kwds): kwds['gcpolicy'] = 'minimark' return _LLVMMixin._compile(self, *args, **kwds) From noreply at buildbot.pypy.org Wed Nov 20 02:05:00 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 20 Nov 2013 02:05:00 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: The operation bare_raw_store() should emit the same code as raw_store(). Message-ID: <20131120010500.4085D1C237F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68252:083d15984f7a Date: 2013-11-20 02:04 +0100 http://bitbucket.org/pypy/pypy/changeset/083d15984f7a/ Log: The operation bare_raw_store() should emit the same code as raw_store(). diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1343,6 +1343,7 @@ def op_raw_store(self, result, addr, offset, value): addr = self._get_addr(value.type_, addr, offset) self.w('store {value.TV}, {addr.TV}'.format(**locals())) + op_bare_raw_store = op_raw_store def op_raw_memclear(self, result, ptr, size): self.op_direct_call(result, get_repr(llvm_memset), ptr, null_char, From noreply at buildbot.pypy.org Wed Nov 20 02:30:55 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 20 Nov 2013 02:30:55 +0100 (CET) Subject: [pypy-commit] pypy py3k: revert part of ed5309c80fdf and adapt its test to this branch: py3k doesn't Message-ID: <20131120013055.20B771C237F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68253:a95599228f5a Date: 2013-11-19 17:26 -0800 http://bitbucket.org/pypy/pypy/changeset/a95599228f5a/ Log: revert part of ed5309c80fdf and adapt its test to this branch: py3k doesn't need these workarounds diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -20,13 +20,8 @@ pass try: - encoding = sys.stderr.encoding - except: - encoding = None - - try: from traceback import print_exception - print_exception(exctype, value, traceback, _encoding=encoding) + print_exception(exctype, value, traceback) except: if not excepthook_failsafe(exctype, value): raise diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -283,8 +283,7 @@ def getvalue(self): return ''.join(self.output) - for input, expectedoutput in [(u"\u013a", "\xe5"), - (u"\u1111", "\\u1111")]: + for input in ("\u013a", "\u1111"): err = MyStringIO() err.encoding = 'iso-8859-2' sys.stderr = err @@ -292,12 +291,12 @@ eh = sys.__excepthook__ try: raise ValueError(input) - except ValueError, exc: + except ValueError as exc: eh(*sys.exc_info()) sys.stderr = savestderr - print repr(err.getvalue()) - assert err.getvalue().endswith("ValueError: %s\n" % expectedoutput) + print(ascii(err.getvalue())) + assert err.getvalue().endswith("ValueError: %s\n" % input) # FIXME: testing the code for a lost or replaced excepthook in # Python/pythonrun.c::PyErr_PrintEx() is tricky. From noreply at buildbot.pypy.org Wed Nov 20 02:45:02 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 20 Nov 2013 02:45:02 +0100 (CET) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <20131120014502.286591C0225@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68254:422a28c0490d Date: 2013-11-19 17:44 -0800 http://bitbucket.org/pypy/pypy/changeset/422a28c0490d/ Log: 2to3 diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -174,7 +174,7 @@ if (!PyArg_ParseTuple(args, "s#", &buf, &y)) { return NULL; } - return PyInt_FromSsize_t(y); + return PyLong_FromSsize_t(y); ''') if sys.maxsize < 2**32: expected = 5 @@ -182,7 +182,7 @@ expected = -0xfffffffb else: expected = 0x5ffffffff - assert charbuf('12345') == expected + assert charbuf(b'12345') == expected def test_pyarg_parse_with_py_ssize_t(self): charbuf = self.import_parser( @@ -192,6 +192,6 @@ if (!PyArg_ParseTuple(args, "s#", &buf, &y)) { return NULL; } - return PyInt_FromSsize_t(y); + return PyLong_FromSsize_t(y); ''', PY_SSIZE_T_CLEAN=True) - assert charbuf('12345') == 5 + assert charbuf(b'12345') == 5 From noreply at buildbot.pypy.org Wed Nov 20 02:45:49 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 20 Nov 2013 02:45:49 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20131120014549.74B5C1C0225@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68255:6331cf185f84 Date: 2013-11-19 17:44 -0800 http://bitbucket.org/pypy/pypy/changeset/6331cf185f84/ Log: merge default diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -10,6 +10,8 @@ .. branch: numpy-newbyteorder Clean up numpy types, add newbyteorder functionality -.. branch windows-packaging +.. branch: windows-packaging Package tk/tcl runtime with win32 +.. branch: armhf-singlefloat +JIT support for singlefloats on ARM using the hardfloat ABI diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -151,6 +151,14 @@ endian = NPY_NATBYTE return space.wrap("%s%s%s" % (endian, basic, size)) + def descr_get_descr(self, space): + if not self.is_record_type(): + return space.newlist([space.newtuple([space.wrap(""), + self.descr_get_str(space)])]) + else: + raise OperationError(space.w_NotImplementedError, space.wrap( + "descr not implemented for record types")) + def descr_get_base(self, space): return space.wrap(self.base) @@ -448,6 +456,7 @@ fields = GetSetProperty(W_Dtype.descr_get_fields), names = GetSetProperty(W_Dtype.descr_get_names), hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), + descr = GetSetProperty(W_Dtype.descr_get_descr), ) W_Dtype.typedef.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -93,7 +93,11 @@ def descr_fill(self, space, w_value): self.fill(self.get_dtype().coerce(space, w_value)) - def descr_tostring(self, space): + def descr_tostring(self, space, w_order=None): + order = order_converter(space, w_order, NPY_CORDER) + if order == NPY_FORTRANORDER: + raise OperationError(space.w_NotImplementedError, space.wrap( + "unsupported value for order")) return space.wrap(loop.tostring(space, self)) def getitem_filter(self, space, arr): diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -831,6 +831,17 @@ assert x.dtype == int8 assert (x == array(42)).all() + def test_descr(self): + import numpy as np + assert np.dtype('i2')[::2].tostring() == '\x00\x01\x00\x03' assert array(0, dtype='i2').tostring() == '\x00\x00' + a = array([[1, 2], [3, 4]], dtype='i1') + for order in (None, False, 'C', 'K', 'a'): + assert a.tostring(order) == '\x01\x02\x03\x04' + import sys + for order in (True, 'F'): + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, a.tostring, order) + else: + assert a.tostring(order) == '\x01\x03\x02\x04' class AppTestRepr(BaseNumpyAppTest): @@ -3019,7 +3032,8 @@ from numpypy import dtype, array, zeros d = dtype([("x", "int", 3), ("y", "float", 5)]) - a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) + a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), + ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) for v in ['x', u'x', 0, -2]: assert (a[0][v] == [1, 2, 3]).all() @@ -3029,7 +3043,8 @@ assert (a[1][v] == [5.5, 6.5, 7.5, 8.5, 9.5]).all() for v in [-3, 2]: exc = raises(IndexError, "a[0][%d]" % v) - assert exc.value.message == "invalid index (%d)" % (v + 2 if v < 0 else v) + assert exc.value.message == "invalid index (%d)" % \ + (v + 2 if v < 0 else v) exc = raises(IndexError, "a[0]['z']") assert exc.value.message == "invalid index" exc = raises(IndexError, "a[0][None]") @@ -3099,7 +3114,8 @@ from numpypy import dtype, array d = dtype([("x", "int", 3), ("y", "float", 5)]) - a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) + a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), + ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) assert len(list(a[0])) == 2 diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -227,20 +227,81 @@ class HardFloatCallBuilder(ARMCallbuilder): + next_arg_vfp = 0 + next_arg_svfp = 0 + + def get_next_vfp(self, tp): + assert tp in 'fS' + if self.next_arg_vfp == -1: + return None + if tp == 'S': + i = self.next_arg_svfp + next_vfp = (i >> 1) + 1 + if not (i + 1) & 1: # i is even + self.next_arg_vfp = max(self.next_arg_vfp, next_vfp) + self.next_arg_svfp = self.next_arg_vfp << 1 + else: + self.next_arg_svfp += 1 + self.next_arg_vfp = next_vfp + lst = r.svfp_argument_regs + else: # 64bit double + i = self.next_arg_vfp + self.next_arg_vfp += 1 + if self.next_arg_svfp >> 1 == i: + self.next_arg_svfp = self.next_arg_vfp << 1 + lst = r.vfp_argument_regs + try: + return lst[i] + except IndexError: + self.next_arg_vfp = self.next_arg_svfp = -1 + return None + def prepare_arguments(self): non_float_locs = [] non_float_regs = [] float_locs = [] float_regs = [] stack_args = [] + singlefloats = None arglocs = self.arglocs argtypes = self.argtypes count = 0 # stack alignment counter on_stack = 0 - for arg in arglocs: - if arg.type != FLOAT: + for i in range(len(arglocs)): + argtype = INT + if i < len(argtypes) and argtypes[i] == 'S': + argtype = argtypes[i] + arg = arglocs[i] + if arg.is_float(): + argtype = FLOAT + reg = self.get_next_vfp(argtype) + if reg: + assert len(float_regs) < len(r.vfp_argument_regs) + float_locs.append(arg) + assert reg not in float_regs + float_regs.append(reg) + else: # float argument that needs to go on the stack + if count % 2 != 0: + stack_args.append(None) + count = 0 + on_stack += 1 + stack_args.append(arg) + on_stack += 2 + elif argtype == 'S': + # Singlefloat argument + if singlefloats is None: + singlefloats = [] + tgt = self.get_next_vfp(argtype) + if tgt: + singlefloats.append((arg, tgt)) + else: # Singlefloat argument that needs to go on the stack + # treated the same as a regular core register argument + count += 1 + on_stack += 1 + stack_args.append(arg) + else: if len(non_float_regs) < len(r.argument_regs): reg = r.argument_regs[len(non_float_regs)] non_float_locs.append(arg) @@ -249,18 +310,6 @@ count += 1 on_stack += 1 stack_args.append(arg) - else: - if len(float_regs) < len(r.vfp_argument_regs): - reg = r.vfp_argument_regs[len(float_regs)] - float_locs.append(arg) - float_regs.append(reg) - else: # float argument that needs to go on the stack - if count % 2 != 0: - stack_args.append(None) - count = 0 - on_stack += 1 - stack_args.append(arg) - on_stack += 2 # align the stack if count % 2 != 0: stack_args.append(None) @@ -275,13 +324,28 @@ non_float_locs.append(self.fnloc) non_float_regs.append(r.r4) self.fnloc = r.r4 + # remap values stored in vfp registers + remap_frame_layout(self.asm, float_locs, float_regs, r.vfp_ip) + if singlefloats: + for src, dest in singlefloats: + if src.is_float(): + assert 0, 'unsupported case' + if src.is_stack(): + # use special VLDR for 32bit + self.asm.regalloc_mov(src, r.ip) + src = r.ip + if src.is_imm(): + self.mc.gen_load_int(r.ip.value, src.value) + src = r.ip + if src.is_core_reg(): + self.mc.VMOV_cs(dest.value, src.value) # remap values stored in core registers remap_frame_layout(self.asm, non_float_locs, non_float_regs, r.ip) - # remap values stored in vfp registers - remap_frame_layout(self.asm, float_locs, float_regs, r.vfp_ip) def load_result(self): resloc = self.resloc + if self.restype == 'S': + self.mc.VMOV_sc(resloc.value, r.s0.value) # ensure the result is wellformed and stored in the correct location if resloc is not None and resloc.is_core_reg(): self._ensure_result_bit_extension(resloc, diff --git a/rpython/jit/backend/arm/codebuilder.py b/rpython/jit/backend/arm/codebuilder.py --- a/rpython/jit/backend/arm/codebuilder.py +++ b/rpython/jit/backend/arm/codebuilder.py @@ -178,6 +178,30 @@ | (dm & 0xF)) self.write32(instr) + def VMOV_sc(self, dest, src): + """move a single precision vfp register[src] to a core reg[dest]""" + self._VMOV_32bit(src, dest, to_arm_register=1) + + def VMOV_cs(self, dest, src): + """move a core register[src] to a single precision vfp + register[dest]""" + self._VMOV_32bit(dest, src, to_arm_register=0) + + def _VMOV_32bit(self, float_reg, core_reg, to_arm_register, cond=cond.AL): + """This instruction transfers the contents of a single-precision VFP + register to an ARM core register, or the contents of an ARM core + register to a single-precision VFP register. + """ + instr = (cond << 28 + | 0xE << 24 + | to_arm_register << 20 + | ((float_reg >> 1) & 0xF) << 16 + | core_reg << 12 + | 0xA << 8 + | (float_reg & 0x1) << 7 + | 1 << 4) + self.write32(instr) + def VMOV_cc(self, dd, dm, cond=cond.AL): sz = 1 # for 64-bit mode instr = (cond << 28 @@ -198,8 +222,16 @@ self._VCVT(target, source, cond, 0, 1) def _VCVT(self, target, source, cond, opc2, sz): - D = 0 - M = 0 + # A8.6.295 + to_integer = (opc2 >> 2) & 1 + if to_integer: + D = target & 1 + target >>= 1 + M = (source >> 4) & 1 + else: + M = source & 1 + source >>= 1 + D = (target >> 4) & 1 op = 1 instr = (cond << 28 | 0xEB8 << 16 @@ -216,8 +248,8 @@ def _VCVT_single_double(self, target, source, cond, sz): # double_to_single = (sz == '1'); - D = 0 - M = 0 + D = target & 1 if sz else (target >> 4) & 1 + M = (source >> 4) & 1 if sz else source & 1 instr = (cond << 28 | 0xEB7 << 16 | 0xAC << 4 diff --git a/rpython/jit/backend/arm/locations.py b/rpython/jit/backend/arm/locations.py --- a/rpython/jit/backend/arm/locations.py +++ b/rpython/jit/backend/arm/locations.py @@ -55,12 +55,8 @@ type = FLOAT width = 2 * WORD - def get_single_precision_regs(self): - return [VFPRegisterLocation(i) for i in - [self.value * 2, self.value * 2 + 1]] - def __repr__(self): - return 'vfp%d' % self.value + return 'vfp(d%d)' % self.value def is_core_reg(self): return False @@ -74,6 +70,14 @@ def is_float(self): return True +class SVFPRegisterLocation(VFPRegisterLocation): + """Single Precission VFP Register""" + _immutable_ = True + width = WORD + type = 'S' + + def __repr__(self): + return 'vfp(s%d)' % self.value class ImmLocation(AssemblerLocation): _immutable_ = True diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1102,17 +1102,16 @@ arg, res = arglocs assert arg.is_vfp_reg() assert res.is_core_reg() - self.mc.VCVT_float_to_int(r.vfp_ip.value, arg.value) - self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value) + self.mc.VCVT_float_to_int(r.svfp_ip.value, arg.value) + self.mc.VMOV_sc(res.value, r.svfp_ip.value) return fcond def emit_op_cast_int_to_float(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert res.is_vfp_reg() assert arg.is_core_reg() - self.mc.MOV_ri(r.ip.value, 0) - self.mc.VMOV_cr(res.value, arg.value, r.ip.value) - self.mc.VCVT_int_to_float(res.value, res.value) + self.mc.VMOV_cs(r.svfp_ip.value, arg.value) + self.mc.VCVT_int_to_float(res.value, r.svfp_ip.value) return fcond emit_op_llong_add = gen_emit_float_op('llong_add', 'VADD_i64') @@ -1147,15 +1146,14 @@ arg, res = arglocs assert arg.is_vfp_reg() assert res.is_core_reg() - self.mc.VCVT_f64_f32(r.vfp_ip.value, arg.value) - self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value) + self.mc.VCVT_f64_f32(r.svfp_ip.value, arg.value) + self.mc.VMOV_sc(res.value, r.svfp_ip.value) return fcond def emit_op_cast_singlefloat_to_float(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert res.is_vfp_reg() assert arg.is_core_reg() - self.mc.MOV_ri(r.ip.value, 0) - self.mc.VMOV_cr(res.value, arg.value, r.ip.value) - self.mc.VCVT_f32_f64(res.value, res.value) + self.mc.VMOV_cs(r.svfp_ip.value, arg.value) + self.mc.VCVT_f32_f64(res.value, r.svfp_ip.value) return fcond diff --git a/rpython/jit/backend/arm/registers.py b/rpython/jit/backend/arm/registers.py --- a/rpython/jit/backend/arm/registers.py +++ b/rpython/jit/backend/arm/registers.py @@ -1,8 +1,10 @@ from rpython.jit.backend.arm.locations import VFPRegisterLocation +from rpython.jit.backend.arm.locations import SVFPRegisterLocation from rpython.jit.backend.arm.locations import RegisterLocation registers = [RegisterLocation(i) for i in range(16)] vfpregisters = [VFPRegisterLocation(i) for i in range(16)] +svfpregisters = [SVFPRegisterLocation(i) for i in range(32)] [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15] = registers @@ -10,6 +12,10 @@ [d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15] = vfpregisters +# single precission VFP registers, 32-bit +for i in range(32): + globals()['s%d' % i] = svfpregisters[i] + # aliases for registers fp = r11 ip = r12 @@ -17,6 +23,7 @@ lr = r14 pc = r15 vfp_ip = d15 +svfp_ip = s31 all_regs = [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10] all_vfp_regs = vfpregisters[:-1] @@ -27,6 +34,7 @@ callee_restored_registers = callee_resp + [pc] vfp_argument_regs = caller_vfp_resp = [d0, d1, d2, d3, d4, d5, d6, d7] +svfp_argument_regs = [globals()['s%i' % i] for i in range(16)] callee_vfp_resp = [d8, d9, d10, d11, d12, d13, d14, d15] callee_saved_vfp_registers = callee_vfp_resp diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -22,7 +22,7 @@ supports_floats = True supports_longlong = False # XXX requires an implementation of # read_timestamp that works in user mode - supports_singlefloats = not detect_hardfloat() + supports_singlefloats = True from rpython.jit.backend.arm.arch import JITFRAME_FIXED_SIZE all_reg_indexes = range(len(all_regs)) diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -4,7 +4,6 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rarithmetic import r_uint -from rpython.rlib.objectmodel import we_are_translated from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform @@ -80,38 +79,6 @@ RTLD_NOW = cConfig.RTLD_NOW RTLD_LAZY = cConfig.RTLD_LAZY - _t_opened = {} - - def t_dlopen(name): - # for direct execution: can't use the regular way on FreeBSD :-( - # http://factor-language.blogspot.de/2009/02/note-about-libdl-functions-on-netbsd.html - import ctypes - if name: - name = rffi.charp2str(name) - else: - name = None - try: - res = ctypes.cdll.LoadLibrary(name) - except OSError, e: - raise DLOpenError(str(e)) - h = rffi.cast(rffi.VOIDP, res._handle) - _t_opened[rffi.cast(rffi.LONG, h)] = res - return h - - def t_dlclose(handle): - _t_opened.pop(rffi.cast(rffi.LONG, handle)) - return rffi.cast(rffi.INT, 0) - - def t_dldym(handle, name): - import ctypes - lib = _t_opened[rffi.cast(rffi.LONG, handle)] - try: - symbol = lib[name] - except AttributeError: - raise KeyError(name) - res = ctypes.cast(symbol, ctypes.c_void_p) - return rffi.cast(rffi.VOIDP, res.value or 0) - def dlerror(): # XXX this would never work on top of ll2ctypes, because # ctypes are calling dlerror itself, unsure if I can do much in this @@ -124,8 +91,6 @@ def dlopen(name, mode=-1): """ Wrapper around C-level dlopen """ - if not we_are_translated(): - return t_dlopen(name) if mode == -1: if RTLD_LOCAL is not None: mode = RTLD_LOCAL @@ -139,16 +104,11 @@ raise DLOpenError(err) return res - def dlclose(handle): - if not we_are_translated(): - return t_dlclose(handle) - return c_dlclose(handle) + dlclose = c_dlclose def dlsym(libhandle, name): """ Wrapper around C-level dlsym """ - if not we_are_translated(): - return t_dldym(libhandle, name) res = c_dlsym(libhandle, name) if not res: raise KeyError(name) diff --git a/rpython/rlib/test/test_rdynload.py b/rpython/rlib/test/test_rdynload.py --- a/rpython/rlib/test/test_rdynload.py +++ b/rpython/rlib/test/test_rdynload.py @@ -21,4 +21,3 @@ lltype.Signed)), dlsym(lib, 'abs')) assert 1 == handle(1) assert 1 == handle(-1) - dlclose(lib) From noreply at buildbot.pypy.org Wed Nov 20 11:59:49 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 20 Nov 2013 11:59:49 +0100 (CET) Subject: [pypy-commit] pypy default: use env variable for disabling subprocess; Message-ID: <20131120105949.724121C3010@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r68256:cf1ab17b3d9c Date: 2013-11-18 15:36 +0100 http://bitbucket.org/pypy/pypy/changeset/cf1ab17b3d9c/ Log: use env variable for disabling subprocess; diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -49,7 +49,7 @@ sys.stdout.flush() -if sys.platform != 'win32' and hasattr(os, 'fork'): +if sys.platform != 'win32' and hasattr(os, 'fork') and not os.getenv("PYPY_DONT_RUN_SUBPROCESS", None): # do this at import-time, when the process is still tiny _source = os.path.dirname(os.path.abspath(__file__)) _source = os.path.join(_source, 'runsubprocess.py') # and not e.g. '.pyc' From noreply at buildbot.pypy.org Wed Nov 20 11:59:50 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 20 Nov 2013 11:59:50 +0100 (CET) Subject: [pypy-commit] pypy default: (ronan, fijal, esoda) strip for rpython Message-ID: <20131120105950.A82FA1C3010@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r68257:973ffa4ffa68 Date: 2013-11-20 11:59 +0100 http://bitbucket.org/pypy/pypy/changeset/973ffa4ffa68/ Log: (ronan, fijal, esoda) strip for rpython diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -460,13 +460,13 @@ check_negative_slice(start, end, "count") return SomeInteger(nonneg=True) - def method_strip(str, chr): + def method_strip(str, chr=None): return str.basestringclass(no_nul=str.no_nul) - def method_lstrip(str, chr): + def method_lstrip(str, chr=None): return str.basestringclass(no_nul=str.no_nul) - def method_rstrip(str, chr): + def method_rstrip(str, chr=None): return str.basestringclass(no_nul=str.no_nul) def method_join(str, s_list): diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -9,6 +9,7 @@ from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import ll_str, llmemory +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.lltypesystem.lltype import (GcStruct, Signed, Array, Char, UniChar, Ptr, malloc, Bool, Void, GcArray, nullptr, cast_primitive, typeOf, staticAdtMethod, GcForwardReference) @@ -402,6 +403,46 @@ return result @jit.elidable + def ll_strip_default(s, left, right): + s_len = len(s.chars) + if s_len == 0: + return s.empty() + lpos = 0 + rpos = s_len - 1 + if left: + while lpos < rpos and s.chars[lpos].isspace(): + lpos += 1 + if right: + while lpos < rpos + 1 and s.chars[rpos].isspace(): + rpos -= 1 + if rpos < lpos: + return s.empty() + r_len = rpos - lpos + 1 + result = s.malloc(r_len) + s.copy_contents(s, result, lpos, 0, r_len) + return result + + @jit.elidable + def ll_strip_multiple(s, s2, left, right): + s_len = len(s.chars) + if s_len == 0: + return s.empty() + lpos = 0 + rpos = s_len - 1 + if left: + while lpos < rpos and LLHelpers.ll_contains(s2, s.chars[lpos]): + lpos += 1 + if right: + while lpos < rpos + 1 and LLHelpers.ll_contains(s2, s.chars[rpos]): + rpos -= 1 + if rpos < lpos: + return s.empty() + r_len = rpos - lpos + 1 + result = s.malloc(r_len) + s.copy_contents(s, result, lpos, 0, r_len) + return result + + @jit.elidable def ll_upper(s): s_chars = s.chars s_len = len(s_chars) diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -231,11 +231,22 @@ def rtype_method_strip(self, hop, left=True, right=True): rstr = hop.args_r[0].repr v_str = hop.inputarg(rstr.repr, arg=0) - v_char = hop.inputarg(rstr.char_repr, arg=1) - v_left = hop.inputconst(Bool, left) - v_right = hop.inputconst(Bool, right) + args_v = [v_str] + if len(hop.args_s) == 2: + if isinstance(hop.args_s[1], annmodel.SomeString): + v_stripstr = hop.inputarg(rstr.repr, arg=1) + args_v.append(v_stripstr) + func = self.ll.ll_strip_multiple + else: + v_char = hop.inputarg(rstr.char_repr, arg=1) + args_v.append(v_char) + func = self.ll.ll_strip + else: + func = self.ll.ll_strip_default + args_v.append(hop.inputconst(Bool, left)) + args_v.append(hop.inputconst(Bool, right)) hop.exception_is_here() - return hop.gendirectcall(self.ll.ll_strip, v_str, v_char, v_left, v_right) + return hop.gendirectcall(func, *args_v) def rtype_method_lstrip(self, hop): return self.rtype_method_strip(hop, left=True, right=False) diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -9,6 +9,7 @@ from rpython.rtyper.rstr import AbstractLLHelpers from rpython.rtyper.rtyper import TyperError from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.rtyper.annlowlevel import llstr, hlstr def test_parse_fmt(): @@ -457,6 +458,29 @@ res = self.interpret(left2, []) assert self.ll_to_string(res) == const('a') + def test_strip_multiple_chars(self): + const = self.const + def both(): + return const('!ab!').strip(const('!a')) + def left(): + return const('!+ab!').lstrip(const('!+')) + def right(): + return const('!ab!+').rstrip(const('!+')) + def empty(): + return const(' \t\t ').strip('\t ') + def left2(): + return const('a ').strip(' \t') + res = self.interpret(both, []) + assert self.ll_to_string(res) == const('b') + res = self.interpret(left, []) + assert self.ll_to_string(res) == const('ab!') + res = self.interpret(right, []) + assert self.ll_to_string(res) == const('!ab') + res = self.interpret(empty, []) + assert self.ll_to_string(res) == const('') + res = self.interpret(left2, []) + assert self.ll_to_string(res) == const('a') + def test_upper(self): const = self.const constchar = self.constchar @@ -1143,3 +1167,16 @@ self.interpret(f, [array, 4]) assert list(array) == list('abc'*4) lltype.free(array, flavor='raw') + + def test_strip_no_arg(self): + strings = [" xyz ", "", "\t\vx"] + + def f(i): + return strings[i].strip() + + res = self.interpret(f, [0]) + assert hlstr(res) == "xyz" + res = self.interpret(f, [1]) + assert hlstr(res) == "" + res = self.interpret(f, [2]) + assert hlstr(res) == "x" From noreply at buildbot.pypy.org Wed Nov 20 11:59:52 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 20 Nov 2013 11:59:52 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20131120105952.DD1021C3010@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r68258:ccb17672557e Date: 2013-11-20 11:59 +0100 http://bitbucket.org/pypy/pypy/changeset/ccb17672557e/ Log: merge diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -112,6 +112,10 @@ incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform.startswith("freebsd"): + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include'] + linklibs = ['tk86', 'tcl86'] + libdirs = ['/usr/local/lib'] elif sys.platform == 'win32': incdirs = [] linklibs = ['tcl85', 'tk85'] diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -10,6 +10,8 @@ .. branch: numpy-newbyteorder Clean up numpy types, add newbyteorder functionality -.. branch windows-packaging +.. branch: windows-packaging Package tk/tcl runtime with win32 +.. branch: armhf-singlefloat +JIT support for singlefloats on ARM using the hardfloat ABI diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1086,7 +1086,9 @@ assert strlenaddr == cast(BVoidP, strlen) def test_read_variable(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -1094,7 +1096,9 @@ assert stderr == cast(BVoidP, _testfunc(8)) def test_read_variable_as_unknown_length_array(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BCharP = new_pointer_type(new_primitive_type("char")) BArray = new_array_type(BCharP, None) @@ -1104,7 +1108,9 @@ # ^^ and not 'char[]', which is basically not allowed and would crash def test_write_variable(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -211,7 +211,15 @@ "field named %s not found" % idx)) return RecordChunk(idx) if (space.isinstance_w(w_idx, space.w_int) or - space.isinstance_w(w_idx, space.w_slice)): + space.isinstance_w(w_idx, space.w_slice)): + return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) + elif isinstance(w_idx, W_NDimArray) and \ + isinstance(w_idx.implementation, scalar.Scalar): + w_idx = w_idx.get_scalar_value().item(space) + if not space.isinstance_w(w_idx, space.w_int) and \ + not space.isinstance_w(w_idx, space.w_bool): + raise OperationError(space.w_IndexError, space.wrap( + "arrays used as indices must be of integer (or boolean) type")) return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) elif space.is_w(w_idx, space.w_None): return Chunks([NewAxisChunk()]) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -151,6 +151,14 @@ endian = NPY_NATBYTE return space.wrap("%s%s%s" % (endian, basic, size)) + def descr_get_descr(self, space): + if not self.is_record_type(): + return space.newlist([space.newtuple([space.wrap(""), + self.descr_get_str(space)])]) + else: + raise OperationError(space.w_NotImplementedError, space.wrap( + "descr not implemented for record types")) + def descr_get_base(self, space): return space.wrap(self.base) @@ -447,6 +455,7 @@ fields = GetSetProperty(W_Dtype.descr_get_fields), names = GetSetProperty(W_Dtype.descr_get_names), hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), + descr = GetSetProperty(W_Dtype.descr_get_descr), ) W_Dtype.typedef.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -93,7 +93,11 @@ def descr_fill(self, space, w_value): self.fill(self.get_dtype().coerce(space, w_value)) - def descr_tostring(self, space): + def descr_tostring(self, space, w_order=None): + order = order_converter(space, w_order, NPY_CORDER) + if order == NPY_FORTRANORDER: + raise OperationError(space.w_NotImplementedError, space.wrap( + "unsupported value for order")) return space.wrap(loop.tostring(space, self)) def getitem_filter(self, space, arr): @@ -198,7 +202,8 @@ prefix) def descr_getitem(self, space, w_idx): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type(): + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + and len(w_idx.get_shape()) > 0: return self.getitem_filter(space, w_idx) try: return self.implementation.descr_getitem(space, self, w_idx) @@ -212,7 +217,8 @@ self.implementation.setitem_index(space, index_list, w_value) def descr_setitem(self, space, w_idx, w_value): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type(): + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + and len(w_idx.get_shape()) > 0: self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) return try: diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -832,6 +832,17 @@ assert x.dtype == int8 assert (x == array(42)).all() + def test_descr(self): + import numpy as np + assert np.dtype('i2')[::2].tostring() == '\x00\x01\x00\x03' assert array(0, dtype='i2').tostring() == '\x00\x00' + a = array([[1, 2], [3, 4]], dtype='i1') + for order in (None, False, 'C', 'K', 'a'): + assert a.tostring(order) == '\x01\x02\x03\x04' + import sys + for order in (True, 'F'): + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, a.tostring, order) + else: + assert a.tostring(order) == '\x01\x03\x02\x04' class AppTestRepr(BaseNumpyAppTest): @@ -3015,7 +3040,8 @@ from numpypy import dtype, array, zeros d = dtype([("x", "int", 3), ("y", "float", 5)]) - a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) + a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), + ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) for v in ['x', u'x', 0, -2]: assert (a[0][v] == [1, 2, 3]).all() @@ -3025,7 +3051,8 @@ assert (a[1][v] == [5.5, 6.5, 7.5, 8.5, 9.5]).all() for v in [-3, 2]: exc = raises(IndexError, "a[0][%d]" % v) - assert exc.value.message == "invalid index (%d)" % (v + 2 if v < 0 else v) + assert exc.value.message == "invalid index (%d)" % \ + (v + 2 if v < 0 else v) exc = raises(IndexError, "a[0]['z']") assert exc.value.message == "invalid index" exc = raises(IndexError, "a[0][None]") @@ -3095,7 +3122,8 @@ from numpypy import dtype, array d = dtype([("x", "int", 3), ("y", "float", 5)]) - a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) + a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), + ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) assert len(list(a[0])) == 2 diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -232,5 +232,11 @@ else: print_usage() + if os.environ.has_key("PYPY_PACKAGE_NOSTRIP"): + kw['nostrip'] = True + + if os.environ.has_key("PYPY_PACKAGE_WITHOUTTK"): + kw['withouttk'] = True + args = args[i:] package(*args, **kw) diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -227,20 +227,81 @@ class HardFloatCallBuilder(ARMCallbuilder): + next_arg_vfp = 0 + next_arg_svfp = 0 + + def get_next_vfp(self, tp): + assert tp in 'fS' + if self.next_arg_vfp == -1: + return None + if tp == 'S': + i = self.next_arg_svfp + next_vfp = (i >> 1) + 1 + if not (i + 1) & 1: # i is even + self.next_arg_vfp = max(self.next_arg_vfp, next_vfp) + self.next_arg_svfp = self.next_arg_vfp << 1 + else: + self.next_arg_svfp += 1 + self.next_arg_vfp = next_vfp + lst = r.svfp_argument_regs + else: # 64bit double + i = self.next_arg_vfp + self.next_arg_vfp += 1 + if self.next_arg_svfp >> 1 == i: + self.next_arg_svfp = self.next_arg_vfp << 1 + lst = r.vfp_argument_regs + try: + return lst[i] + except IndexError: + self.next_arg_vfp = self.next_arg_svfp = -1 + return None + def prepare_arguments(self): non_float_locs = [] non_float_regs = [] float_locs = [] float_regs = [] stack_args = [] + singlefloats = None arglocs = self.arglocs argtypes = self.argtypes count = 0 # stack alignment counter on_stack = 0 - for arg in arglocs: - if arg.type != FLOAT: + for i in range(len(arglocs)): + argtype = INT + if i < len(argtypes) and argtypes[i] == 'S': + argtype = argtypes[i] + arg = arglocs[i] + if arg.is_float(): + argtype = FLOAT + reg = self.get_next_vfp(argtype) + if reg: + assert len(float_regs) < len(r.vfp_argument_regs) + float_locs.append(arg) + assert reg not in float_regs + float_regs.append(reg) + else: # float argument that needs to go on the stack + if count % 2 != 0: + stack_args.append(None) + count = 0 + on_stack += 1 + stack_args.append(arg) + on_stack += 2 + elif argtype == 'S': + # Singlefloat argument + if singlefloats is None: + singlefloats = [] + tgt = self.get_next_vfp(argtype) + if tgt: + singlefloats.append((arg, tgt)) + else: # Singlefloat argument that needs to go on the stack + # treated the same as a regular core register argument + count += 1 + on_stack += 1 + stack_args.append(arg) + else: if len(non_float_regs) < len(r.argument_regs): reg = r.argument_regs[len(non_float_regs)] non_float_locs.append(arg) @@ -249,18 +310,6 @@ count += 1 on_stack += 1 stack_args.append(arg) - else: - if len(float_regs) < len(r.vfp_argument_regs): - reg = r.vfp_argument_regs[len(float_regs)] - float_locs.append(arg) - float_regs.append(reg) - else: # float argument that needs to go on the stack - if count % 2 != 0: - stack_args.append(None) - count = 0 - on_stack += 1 - stack_args.append(arg) - on_stack += 2 # align the stack if count % 2 != 0: stack_args.append(None) @@ -275,13 +324,28 @@ non_float_locs.append(self.fnloc) non_float_regs.append(r.r4) self.fnloc = r.r4 + # remap values stored in vfp registers + remap_frame_layout(self.asm, float_locs, float_regs, r.vfp_ip) + if singlefloats: + for src, dest in singlefloats: + if src.is_float(): + assert 0, 'unsupported case' + if src.is_stack(): + # use special VLDR for 32bit + self.asm.regalloc_mov(src, r.ip) + src = r.ip + if src.is_imm(): + self.mc.gen_load_int(r.ip.value, src.value) + src = r.ip + if src.is_core_reg(): + self.mc.VMOV_cs(dest.value, src.value) # remap values stored in core registers remap_frame_layout(self.asm, non_float_locs, non_float_regs, r.ip) - # remap values stored in vfp registers - remap_frame_layout(self.asm, float_locs, float_regs, r.vfp_ip) def load_result(self): resloc = self.resloc + if self.restype == 'S': + self.mc.VMOV_sc(resloc.value, r.s0.value) # ensure the result is wellformed and stored in the correct location if resloc is not None and resloc.is_core_reg(): self._ensure_result_bit_extension(resloc, diff --git a/rpython/jit/backend/arm/codebuilder.py b/rpython/jit/backend/arm/codebuilder.py --- a/rpython/jit/backend/arm/codebuilder.py +++ b/rpython/jit/backend/arm/codebuilder.py @@ -178,6 +178,30 @@ | (dm & 0xF)) self.write32(instr) + def VMOV_sc(self, dest, src): + """move a single precision vfp register[src] to a core reg[dest]""" + self._VMOV_32bit(src, dest, to_arm_register=1) + + def VMOV_cs(self, dest, src): + """move a core register[src] to a single precision vfp + register[dest]""" + self._VMOV_32bit(dest, src, to_arm_register=0) + + def _VMOV_32bit(self, float_reg, core_reg, to_arm_register, cond=cond.AL): + """This instruction transfers the contents of a single-precision VFP + register to an ARM core register, or the contents of an ARM core + register to a single-precision VFP register. + """ + instr = (cond << 28 + | 0xE << 24 + | to_arm_register << 20 + | ((float_reg >> 1) & 0xF) << 16 + | core_reg << 12 + | 0xA << 8 + | (float_reg & 0x1) << 7 + | 1 << 4) + self.write32(instr) + def VMOV_cc(self, dd, dm, cond=cond.AL): sz = 1 # for 64-bit mode instr = (cond << 28 @@ -198,8 +222,16 @@ self._VCVT(target, source, cond, 0, 1) def _VCVT(self, target, source, cond, opc2, sz): - D = 0 - M = 0 + # A8.6.295 + to_integer = (opc2 >> 2) & 1 + if to_integer: + D = target & 1 + target >>= 1 + M = (source >> 4) & 1 + else: + M = source & 1 + source >>= 1 + D = (target >> 4) & 1 op = 1 instr = (cond << 28 | 0xEB8 << 16 @@ -216,8 +248,8 @@ def _VCVT_single_double(self, target, source, cond, sz): # double_to_single = (sz == '1'); - D = 0 - M = 0 + D = target & 1 if sz else (target >> 4) & 1 + M = (source >> 4) & 1 if sz else source & 1 instr = (cond << 28 | 0xEB7 << 16 | 0xAC << 4 diff --git a/rpython/jit/backend/arm/locations.py b/rpython/jit/backend/arm/locations.py --- a/rpython/jit/backend/arm/locations.py +++ b/rpython/jit/backend/arm/locations.py @@ -55,12 +55,8 @@ type = FLOAT width = 2 * WORD - def get_single_precision_regs(self): - return [VFPRegisterLocation(i) for i in - [self.value * 2, self.value * 2 + 1]] - def __repr__(self): - return 'vfp%d' % self.value + return 'vfp(d%d)' % self.value def is_core_reg(self): return False @@ -74,6 +70,14 @@ def is_float(self): return True +class SVFPRegisterLocation(VFPRegisterLocation): + """Single Precission VFP Register""" + _immutable_ = True + width = WORD + type = 'S' + + def __repr__(self): + return 'vfp(s%d)' % self.value class ImmLocation(AssemblerLocation): _immutable_ = True diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1102,17 +1102,16 @@ arg, res = arglocs assert arg.is_vfp_reg() assert res.is_core_reg() - self.mc.VCVT_float_to_int(r.vfp_ip.value, arg.value) - self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value) + self.mc.VCVT_float_to_int(r.svfp_ip.value, arg.value) + self.mc.VMOV_sc(res.value, r.svfp_ip.value) return fcond def emit_op_cast_int_to_float(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert res.is_vfp_reg() assert arg.is_core_reg() - self.mc.MOV_ri(r.ip.value, 0) - self.mc.VMOV_cr(res.value, arg.value, r.ip.value) - self.mc.VCVT_int_to_float(res.value, res.value) + self.mc.VMOV_cs(r.svfp_ip.value, arg.value) + self.mc.VCVT_int_to_float(res.value, r.svfp_ip.value) return fcond emit_op_llong_add = gen_emit_float_op('llong_add', 'VADD_i64') @@ -1147,15 +1146,14 @@ arg, res = arglocs assert arg.is_vfp_reg() assert res.is_core_reg() - self.mc.VCVT_f64_f32(r.vfp_ip.value, arg.value) - self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value) + self.mc.VCVT_f64_f32(r.svfp_ip.value, arg.value) + self.mc.VMOV_sc(res.value, r.svfp_ip.value) return fcond def emit_op_cast_singlefloat_to_float(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert res.is_vfp_reg() assert arg.is_core_reg() - self.mc.MOV_ri(r.ip.value, 0) - self.mc.VMOV_cr(res.value, arg.value, r.ip.value) - self.mc.VCVT_f32_f64(res.value, res.value) + self.mc.VMOV_cs(r.svfp_ip.value, arg.value) + self.mc.VCVT_f32_f64(res.value, r.svfp_ip.value) return fcond diff --git a/rpython/jit/backend/arm/registers.py b/rpython/jit/backend/arm/registers.py --- a/rpython/jit/backend/arm/registers.py +++ b/rpython/jit/backend/arm/registers.py @@ -1,8 +1,10 @@ from rpython.jit.backend.arm.locations import VFPRegisterLocation +from rpython.jit.backend.arm.locations import SVFPRegisterLocation from rpython.jit.backend.arm.locations import RegisterLocation registers = [RegisterLocation(i) for i in range(16)] vfpregisters = [VFPRegisterLocation(i) for i in range(16)] +svfpregisters = [SVFPRegisterLocation(i) for i in range(32)] [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15] = registers @@ -10,6 +12,10 @@ [d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15] = vfpregisters +# single precission VFP registers, 32-bit +for i in range(32): + globals()['s%d' % i] = svfpregisters[i] + # aliases for registers fp = r11 ip = r12 @@ -17,6 +23,7 @@ lr = r14 pc = r15 vfp_ip = d15 +svfp_ip = s31 all_regs = [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10] all_vfp_regs = vfpregisters[:-1] @@ -27,6 +34,7 @@ callee_restored_registers = callee_resp + [pc] vfp_argument_regs = caller_vfp_resp = [d0, d1, d2, d3, d4, d5, d6, d7] +svfp_argument_regs = [globals()['s%i' % i] for i in range(16)] callee_vfp_resp = [d8, d9, d10, d11, d12, d13, d14, d15] callee_saved_vfp_registers = callee_vfp_resp diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -22,7 +22,7 @@ supports_floats = True supports_longlong = False # XXX requires an implementation of # read_timestamp that works in user mode - supports_singlefloats = not detect_hardfloat() + supports_singlefloats = True from rpython.jit.backend.arm.arch import JITFRAME_FIXED_SIZE all_reg_indexes = range(len(all_regs)) diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -4,7 +4,6 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rarithmetic import r_uint -from rpython.rlib.objectmodel import we_are_translated from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform @@ -80,38 +79,6 @@ RTLD_NOW = cConfig.RTLD_NOW RTLD_LAZY = cConfig.RTLD_LAZY - _t_opened = {} - - def t_dlopen(name): - # for direct execution: can't use the regular way on FreeBSD :-( - # http://factor-language.blogspot.de/2009/02/note-about-libdl-functions-on-netbsd.html - import ctypes - if name: - name = rffi.charp2str(name) - else: - name = None - try: - res = ctypes.cdll.LoadLibrary(name) - except OSError, e: - raise DLOpenError(str(e)) - h = rffi.cast(rffi.VOIDP, res._handle) - _t_opened[rffi.cast(rffi.LONG, h)] = res - return h - - def t_dlclose(handle): - _t_opened.pop(rffi.cast(rffi.LONG, handle)) - return rffi.cast(rffi.INT, 0) - - def t_dldym(handle, name): - import ctypes - lib = _t_opened[rffi.cast(rffi.LONG, handle)] - try: - symbol = lib[name] - except AttributeError: - raise KeyError(name) - res = ctypes.cast(symbol, ctypes.c_void_p) - return rffi.cast(rffi.VOIDP, res.value or 0) - def dlerror(): # XXX this would never work on top of ll2ctypes, because # ctypes are calling dlerror itself, unsure if I can do much in this @@ -124,8 +91,6 @@ def dlopen(name, mode=-1): """ Wrapper around C-level dlopen """ - if not we_are_translated(): - return t_dlopen(name) if mode == -1: if RTLD_LOCAL is not None: mode = RTLD_LOCAL @@ -139,16 +104,11 @@ raise DLOpenError(err) return res - def dlclose(handle): - if not we_are_translated(): - return t_dlclose(handle) - return c_dlclose(handle) + dlclose = c_dlclose def dlsym(libhandle, name): """ Wrapper around C-level dlsym """ - if not we_are_translated(): - return t_dldym(libhandle, name) res = c_dlsym(libhandle, name) if not res: raise KeyError(name) diff --git a/rpython/rlib/test/test_rdynload.py b/rpython/rlib/test/test_rdynload.py --- a/rpython/rlib/test/test_rdynload.py +++ b/rpython/rlib/test/test_rdynload.py @@ -21,4 +21,3 @@ lltype.Signed)), dlsym(lib, 'abs')) assert 1 == handle(1) assert 1 == handle(-1) - dlclose(lib) From noreply at buildbot.pypy.org Wed Nov 20 16:19:48 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 20 Nov 2013 16:19:48 +0100 (CET) Subject: [pypy-commit] pypy refactor-buffer-api: hg merge default Message-ID: <20131120151948.EB6781C0144@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-buffer-api Changeset: r68259:8c9b03d9284a Date: 2013-11-20 16:11 +0100 http://bitbucket.org/pypy/pypy/changeset/8c9b03d9284a/ Log: hg merge default diff too long, truncating to 2000 out of 43782 lines diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,7 +1,38 @@ all: pypy-c +PYPY_EXECUTABLE := $(shell which pypy) +URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") + +ifeq ($(PYPY_EXECUTABLE),) +RUNINTERP = python +else +RUNINTERP = $(PYPY_EXECUTABLE) +endif + pypy-c: - @echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM" - @sleep 3 - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + @echo + @echo "====================================================================" +ifeq ($(PYPY_EXECUTABLE),) + @echo "Building a regular (jitting) version of PyPy, using CPython." + @echo "This takes around 2 hours and $(URAM) GB of RAM." + @echo "Note that pre-installing a PyPy binary would reduce this time" + @echo "and produce basically the same result." +else + @echo "Building a regular (jitting) version of PyPy, using" + @echo "$(PYPY_EXECUTABLE) to run the translation itself." + @echo "This takes up to 1 hour and $(URAM) GB of RAM." +endif + @echo + @echo "For more control (e.g. to use multiple CPU cores during part of" + @echo "the process) you need to run \`\`rpython/bin/rpython'' directly." + @echo "For more information see \`\`http://pypy.org/download.html''." + @echo "====================================================================" + @echo + @sleep 5 + $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + +# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are +# replaced with an opaque --jobserver option by the time this Makefile +# runs. We cannot get their original value either: +# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -1,5 +1,5 @@ ===================================== -PyPy: Python in Python Implementation +PyPy: Python in Python Implementation ===================================== Welcome to PyPy! @@ -26,12 +26,14 @@ Building ======== -build with:: +build with: - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py +.. code-block:: console + + $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest -to use virtualenv with the resulting pypy-c as the interpreter, you can +to use virtualenv with the resulting pypy-c as the interpreter; you can find more details about various installation schemes here: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy diff --git a/lib-python/2.7/argparse.py b/lib-python/2.7/argparse.py --- a/lib-python/2.7/argparse.py +++ b/lib-python/2.7/argparse.py @@ -1780,7 +1780,19 @@ # error if this argument is not allowed with other previously # seen arguments, assuming that actions that use the default # value don't really count as "present" - if argument_values is not action.default: + + # XXX PyPy bug-to-bug compatibility: "is" on primitive types + # is not consistent in CPython. We'll assume it is close + # enough for ints (which is true only for "small ints"), but + # for floats and longs and complexes we'll go for the option + # of forcing "is" to say False, like it usually does on + # CPython. A fix is pending on CPython trunk + # (http://bugs.python.org/issue18943) but that might change + # the details of the semantics and so not be applied to 2.7. + # See the line AA below. + + if (argument_values is not action.default or + type(argument_values) in (float, long, complex)): # AA seen_non_default_actions.add(action) for conflict_action in action_conflicts.get(action, []): if conflict_action in seen_non_default_actions: diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -4,6 +4,21 @@ from __pypy__.builders import StringBuilder, UnicodeBuilder +class StringOrUnicodeBuilder(object): + def __init__(self): + self._builder = StringBuilder() + def append(self, string): + try: + self._builder.append(string) + except UnicodeEncodeError: + ub = UnicodeBuilder() + ub.append(self._builder.build()) + self._builder = ub + ub.append(string) + def build(self): + return self._builder.build() + + ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') @@ -192,7 +207,7 @@ if self.ensure_ascii: builder = StringBuilder() else: - builder = UnicodeBuilder() + builder = StringOrUnicodeBuilder() self.__encode(o, markers, builder, 0) return builder.build() diff --git a/lib-python/2.7/string.py b/lib-python/2.7/string.py --- a/lib-python/2.7/string.py +++ b/lib-python/2.7/string.py @@ -66,16 +66,17 @@ must be of the same length. """ - if len(fromstr) != len(tostr): + n = len(fromstr) + if n != len(tostr): raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) + # this function has been rewritten to suit PyPy better; it is + # almost 10x faster than the original. + buf = bytearray(256) + for i in range(256): + buf[i] = i + for i in range(n): + buf[ord(fromstr[i])] = tostr[i] + return str(buf) diff --git a/lib-python/2.7/test/keycert.pem b/lib-python/2.7/test/keycert.pem --- a/lib-python/2.7/test/keycert.pem +++ b/lib-python/2.7/test/keycert.pem @@ -1,32 +1,31 @@ ------BEGIN RSA PRIVATE KEY----- -MIICXwIBAAKBgQC8ddrhm+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9L -opdJhTvbGfEj0DQs1IE8M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVH -fhi/VwovESJlaBOp+WMnfhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQAB -AoGBAK0FZpaKj6WnJZN0RqhhK+ggtBWwBnc0U/ozgKz2j1s3fsShYeiGtW6CK5nU -D1dZ5wzhbGThI7LiOXDvRucc9n7vUgi0alqPQ/PFodPxAN/eEYkmXQ7W2k7zwsDA -IUK0KUhktQbLu8qF/m8qM86ba9y9/9YkXuQbZ3COl5ahTZrhAkEA301P08RKv3KM -oXnGU2UHTuJ1MAD2hOrPxjD4/wxA/39EWG9bZczbJyggB4RHu0I3NOSFjAm3HQm0 -ANOu5QK9owJBANgOeLfNNcF4pp+UikRFqxk5hULqRAWzVxVrWe85FlPm0VVmHbb/ -loif7mqjU8o1jTd/LM7RD9f2usZyE2psaw8CQQCNLhkpX3KO5kKJmS9N7JMZSc4j -oog58yeYO8BBqKKzpug0LXuQultYv2K4veaIO04iL9VLe5z9S/Q1jaCHBBuXAkEA -z8gjGoi1AOp6PBBLZNsncCvcV/0aC+1se4HxTNo2+duKSDnbq+ljqOM+E7odU+Nq -ewvIWOG//e8fssd0mq3HywJBAJ8l/c8GVmrpFTx8r/nZ2Pyyjt3dH1widooDXYSV -q6Gbf41Llo5sYAtmxdndTLASuHKecacTgZVhy0FryZpLKrU= ------END RSA PRIVATE KEY----- +-----BEGIN PRIVATE KEY----- +MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANtb0+YrKuxevGpm +LrjaUhZSgz6zFAmuGFmKmUbdjmfv9zSmmdsQIksK++jK0Be9LeZy20j6ahOfuVa0 +ufEmPoP7Fy4hXegKZR9cCWcIe/A6H2xWF1IIJLRTLaU8ol/I7T+um5HD5AwAwNPP +USNU0Eegmvp+xxWu3NX2m1Veot85AgMBAAECgYA3ZdZ673X0oexFlq7AAmrutkHt +CL7LvwrpOiaBjhyTxTeSNWzvtQBkIU8DOI0bIazA4UreAFffwtvEuPmonDb3F+Iq +SMAu42XcGyVZEl+gHlTPU9XRX7nTOXVt+MlRRRxL6t9GkGfUAXI3XxJDXW3c0vBK +UL9xqD8cORXOfE06rQJBAP8mEX1ERkR64Ptsoe4281vjTlNfIbs7NMPkUnrn9N/Y +BLhjNIfQ3HFZG8BTMLfX7kCS9D593DW5tV4Z9BP/c6cCQQDcFzCcVArNh2JSywOQ +ZfTfRbJg/Z5Lt9Fkngv1meeGNPgIMLN8Sg679pAOOWmzdMO3V706rNPzSVMME7E5 +oPIfAkEA8pDddarP5tCvTTgUpmTFbakm0KoTZm2+FzHcnA4jRh+XNTjTOv98Y6Ik +eO5d1ZnKXseWvkZncQgxfdnMqqpj5wJAcNq/RVne1DbYlwWchT2Si65MYmmJ8t+F +0mcsULqjOnEMwf5e+ptq5LzwbyrHZYq5FNk7ocufPv/ZQrcSSC+cFwJBAKvOJByS +x56qyGeZLOQlWS2JS3KJo59XuLFGqcbgN9Om9xFa41Yb4N9NvplFivsvZdw3m1Q/ +SPIXQuT8RMPDVNQ= +-----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- -MIICpzCCAhCgAwIBAgIJAP+qStv1cIGNMA0GCSqGSIb3DQEBBQUAMIGJMQswCQYD -VQQGEwJVUzERMA8GA1UECBMIRGVsYXdhcmUxEzARBgNVBAcTCldpbG1pbmd0b24x -IzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9uMQwwCgYDVQQLEwNT -U0wxHzAdBgNVBAMTFnNvbWVtYWNoaW5lLnB5dGhvbi5vcmcwHhcNMDcwODI3MTY1 -NDUwWhcNMTMwMjE2MTY1NDUwWjCBiTELMAkGA1UEBhMCVVMxETAPBgNVBAgTCERl -bGF3YXJlMRMwEQYDVQQHEwpXaWxtaW5ndG9uMSMwIQYDVQQKExpQeXRob24gU29m -dHdhcmUgRm91bmRhdGlvbjEMMAoGA1UECxMDU1NMMR8wHQYDVQQDExZzb21lbWFj -aGluZS5weXRob24ub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC8ddrh -m+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9LopdJhTvbGfEj0DQs1IE8 -M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVHfhi/VwovESJlaBOp+WMn -fhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQABoxUwEzARBglghkgBhvhC -AQEEBAMCBkAwDQYJKoZIhvcNAQEFBQADgYEAF4Q5BVqmCOLv1n8je/Jw9K669VXb -08hyGzQhkemEBYQd6fzQ9A/1ZzHkJKb1P6yreOLSEh4KcxYPyrLRC1ll8nr5OlCx -CMhKkTnR6qBsdNV0XtdU2+N25hqW+Ma4ZeqsN/iiJVCGNOZGnvQuvCAGWF8+J/f/ -iHkC6gGdBJhogs4= +MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV +BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw +MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH +Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k +YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw +gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7 +6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt +pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw +FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd +BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G +lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1 +CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/sha256.pem b/lib-python/2.7/test/sha256.pem --- a/lib-python/2.7/test/sha256.pem +++ b/lib-python/2.7/test/sha256.pem @@ -1,129 +1,128 @@ # Certificate chain for https://sha256.tbs-internet.com - 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=sha-256 production/CN=sha256.tbs-internet.com - i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC + 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=Certificats TBS X509/CN=ecom.tbs-x509.com + i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business -----BEGIN CERTIFICATE----- -MIIGXTCCBUWgAwIBAgIRAMmag+ygSAdxZsbyzYjhuW0wDQYJKoZIhvcNAQELBQAw -gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl +MIIGTjCCBTagAwIBAgIQOh3d9dNDPq1cSdJmEiMpqDANBgkqhkiG9w0BAQUFADCB +yTELMAkGA1UEBhMCRlIxETAPBgNVBAgTCENhbHZhZG9zMQ0wCwYDVQQHEwRDYWVu +MRUwEwYDVQQKEwxUQlMgSU5URVJORVQxSDBGBgNVBAsTP1Rlcm1zIGFuZCBDb25k +aXRpb25zOiBodHRwOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0EvcmVwb3NpdG9y +eTEYMBYGA1UECxMPVEJTIElOVEVSTkVUIENBMR0wGwYDVQQDExRUQlMgWDUwOSBD +QSBidXNpbmVzczAeFw0xMTAxMjUwMDAwMDBaFw0xMzAyMDUyMzU5NTlaMIHHMQsw +CQYDVQQGEwJGUjEOMAwGA1UEERMFMTQwMDAxETAPBgNVBAgTCENhbHZhZG9zMQ0w +CwYDVQQHEwRDQUVOMRswGQYDVQQJExIyMiBydWUgZGUgQnJldGFnbmUxFTATBgNV +BAoTDFRCUyBJTlRFUk5FVDEXMBUGA1UECxMOMDAwMiA0NDA0NDM4MTAxHTAbBgNV +BAsTFENlcnRpZmljYXRzIFRCUyBYNTA5MRowGAYDVQQDExFlY29tLnRicy14NTA5 +LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKRrlHUnJ++1lpcg +jtYco7cdmRe+EEfTmwPfCdfV3G1QfsTSvY6FfMpm/83pqHfT+4ANwr18wD9ZrAEN +G16mf9VdCGK12+TP7DmqeZyGIqlFFoahQnmb8EarvE43/1UeQ2CV9XmzwZvpqeli +LfXsFonawrY3H6ZnMwS64St61Z+9gdyuZ/RbsoZBbT5KUjDEG844QRU4OT1IGeEI +eY5NM5RNIh6ZNhVtqeeCxMS7afONkHQrOco73RdSTRck/Hj96Ofl3MHNHryr+AMK +DGFk1kLCZGpPdXtkxXvaDeQoiYDlil26CWc+YK6xyDPMdsWvoG14ZLyCpzMXA7/7 +4YAQRH0CAwEAAaOCAjAwggIsMB8GA1UdIwQYMBaAFBoJBMz5CY+7HqDO1KQUf0vV +I1jNMB0GA1UdDgQWBBQgOU8HsWzbmD4WZP5Wtdw7jca2WDAOBgNVHQ8BAf8EBAMC +BaAwDAYDVR0TAQH/BAIwADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw +TAYDVR0gBEUwQzBBBgsrBgEEAYDlNwIBATAyMDAGCCsGAQUFBwIBFiRodHRwczov +L3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL0NQUzEwdwYDVR0fBHAwbjA3oDWgM4Yx +aHR0cDovL2NybC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNy +bDAzoDGgL4YtaHR0cDovL2NybC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5l +c3MuY3JsMIGwBggrBgEFBQcBAQSBozCBoDA9BggrBgEFBQcwAoYxaHR0cDovL2Ny +dC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNydDA5BggrBgEF +BQcwAoYtaHR0cDovL2NydC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5lc3Mu +Y3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wMwYDVR0R +BCwwKoIRZWNvbS50YnMteDUwOS5jb22CFXd3dy5lY29tLnRicy14NTA5LmNvbTAN +BgkqhkiG9w0BAQUFAAOCAQEArT4NHfbY87bGAw8lPV4DmHlmuDuVp/y7ltO3Ynse +3Rz8RxW2AzuO0Oy2F0Cu4yWKtMyEyMXyHqWtae7ElRbdTu5w5GwVBLJHClCzC8S9 +SpgMMQTx3Rgn8vjkHuU9VZQlulZyiPK7yunjc7c310S9FRZ7XxOwf8Nnx4WnB+No +WrfApzhhQl31w+RyrNxZe58hCfDDHmevRvwLjQ785ZoQXJDj2j3qAD4aI2yB8lB5 +oaE1jlCJzC7Kmz/Y9jzfmv/zAs1LQTm9ktevv4BTUFaGjv9jxnQ1xnS862ZiouLW +zZYIlYPf4F6JjXGiIQgQRglILUfq3ftJd9/ok9W9ZF8h8w== +-----END CERTIFICATE----- + 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business + i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root +-----BEGIN CERTIFICATE----- +MIIFPzCCBCegAwIBAgIQDlBz/++iRSmLDeVRHT/hADANBgkqhkiG9w0BAQUFADBv +MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk +ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF +eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDcwOTE4MTkyMlow +gckxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv -cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg -Q0EgU0dDMB4XDTEwMDIxODAwMDAwMFoXDTEyMDIxOTIzNTk1OVowgcsxCzAJBgNV -BAYTAkZSMQ4wDAYDVQQREwUxNDAwMDERMA8GA1UECBMIQ2FsdmFkb3MxDTALBgNV -BAcTBENBRU4xGzAZBgNVBAkTEjIyIHJ1ZSBkZSBCcmV0YWduZTEVMBMGA1UEChMM -VEJTIElOVEVSTkVUMRcwFQYDVQQLEw4wMDAyIDQ0MDQ0MzgxMDEbMBkGA1UECxMS -c2hhLTI1NiBwcm9kdWN0aW9uMSAwHgYDVQQDExdzaGEyNTYudGJzLWludGVybmV0 -LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbuM8VT7f0nntwu -N3F7v9KIBlhKNAxqCrziOXU5iqUt8HrQB3DtHbdmII+CpVUlwlmepsx6G+srEZ9a -MIGAy0nxi5aLb7watkyIdPjJTMvTUBQ/+RPWzt5JtYbbY9BlJ+yci0dctP74f4NU -ISLtlrEjUbf2gTohLrcE01TfmOF6PDEbB5PKDi38cB3NzKfizWfrOaJW6Q1C1qOJ -y4/4jkUREX1UFUIxzx7v62VfjXSGlcjGpBX1fvtABQOSLeE0a6gciDZs1REqroFf -5eXtqYphpTa14Z83ITXMfgg5Nze1VtMnzI9Qx4blYBw4dgQVEuIsYr7FDBOITDzc -VEVXZx0CAwEAAaOCAj8wggI7MB8GA1UdIwQYMBaAFAdEdoWTKLx/bXjSCuv6TEvf -2YIfMB0GA1UdDgQWBBSJKI/AYVI9RQNY0QPIqc8ej2QivTAOBgNVHQ8BAf8EBAMC -BaAwDAYDVR0TAQH/BAIwADA0BgNVHSUELTArBggrBgEFBQcDAQYIKwYBBQUHAwIG -CisGAQQBgjcKAwMGCWCGSAGG+EIEATBMBgNVHSAERTBDMEEGCysGAQQBgOU3AgQB -MDIwMAYIKwYBBQUHAgEWJGh0dHBzOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0Ev -Q1BTNDBtBgNVHR8EZjBkMDKgMKAuhixodHRwOi8vY3JsLnRicy1pbnRlcm5ldC5j -b20vVEJTWDUwOUNBU0dDLmNybDAuoCygKoYoaHR0cDovL2NybC50YnMteDUwOS5j -b20vVEJTWDUwOUNBU0dDLmNybDCBpgYIKwYBBQUHAQEEgZkwgZYwOAYIKwYBBQUH -MAKGLGh0dHA6Ly9jcnQudGJzLWludGVybmV0LmNvbS9UQlNYNTA5Q0FTR0MuY3J0 -MDQGCCsGAQUFBzAChihodHRwOi8vY3J0LnRicy14NTA5LmNvbS9UQlNYNTA5Q0FT -R0MuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wPwYD -VR0RBDgwNoIXc2hhMjU2LnRicy1pbnRlcm5ldC5jb22CG3d3dy5zaGEyNTYudGJz -LWludGVybmV0LmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAA5NL0D4QSqhErhlkdPmz -XtiMvdGL+ZehM4coTRIpasM/Agt36Rc0NzCvnQwKE+wkngg1Gy2qe7Q0E/ziqBtB -fZYzdVgu1zdiL4kTaf+wFKYAFGsFbyeEmXysy+CMwaNoF2vpSjCU1UD56bEnTX/W -fxVZYxtBQUpnu2wOsm8cDZuZRv9XrYgAhGj9Tt6F0aVHSDGn59uwShG1+BVF/uju -SCyPTTjL1oc7YElJUzR/x4mQJYvtQI8gDIDAGEOs7v3R/gKa5EMfbUQUI4C84UbI -Yz09Jdnws/MkC/Hm1BZEqk89u7Hvfv+oHqEb0XaUo0TDfsxE0M1sMdnLb91QNQBm -UQ== ------END CERTIFICATE----- - 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC - i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root ------BEGIN CERTIFICATE----- -MIIFVjCCBD6gAwIBAgIQXpDZ0ETJMV02WTx3GTnhhTANBgkqhkiG9w0BAQUFADBv -MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk -ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF -eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDYyNDE5MDYzMFow -gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl -bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u -ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv -cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg -Q0EgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsgOkO3f7wzN6 -rOjg45tR5vjBfzK7qmV9IBxb/QW9EEXxG+E7FNhZqQLtwGBKoSsHTnQqV75wWMk0 -9tinWvftBkSpj5sTi/8cbzJfUvTSVYh3Qxv6AVVjMMH/ruLjE6y+4PoaPs8WoYAQ -ts5R4Z1g8c/WnTepLst2x0/Wv7GmuoQi+gXvHU6YrBiu7XkeYhzc95QdviWSJRDk -owhb5K43qhcvjRmBfO/paGlCliDGZp8mHwrI21mwobWpVjTxZRwYO3bd4+TGcI4G -Ie5wmHwE8F7SK1tgSqbBacKjDa93j7txKkfz/Yd2n7TGqOXiHPsJpG655vrKtnXk -9vs1zoDeJQIDAQABo4IBljCCAZIwHQYDVR0OBBYEFAdEdoWTKLx/bXjSCuv6TEvf -2YIfMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMCAGA1UdJQQZ -MBcGCisGAQQBgjcKAwMGCWCGSAGG+EIEATAYBgNVHSAEETAPMA0GCysGAQQBgOU3 -AgQBMHsGA1UdHwR0MHIwOKA2oDSGMmh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL0Fk -ZFRydXN0RXh0ZXJuYWxDQVJvb3QuY3JsMDagNKAyhjBodHRwOi8vY3JsLmNvbW9k -by5uZXQvQWRkVHJ1c3RFeHRlcm5hbENBUm9vdC5jcmwwgYAGCCsGAQUFBwEBBHQw -cjA4BggrBgEFBQcwAoYsaHR0cDovL2NydC5jb21vZG9jYS5jb20vQWRkVHJ1c3RV -VE5TR0NDQS5jcnQwNgYIKwYBBQUHMAKGKmh0dHA6Ly9jcnQuY29tb2RvLm5ldC9B -ZGRUcnVzdFVUTlNHQ0NBLmNydDARBglghkgBhvhCAQEEBAMCAgQwDQYJKoZIhvcN -AQEFBQADggEBAK2zEzs+jcIrVK9oDkdDZNvhuBYTdCfpxfFs+OAujW0bIfJAy232 -euVsnJm6u/+OrqKudD2tad2BbejLLXhMZViaCmK7D9nrXHx4te5EP8rL19SUVqLY -1pTnv5dhNgEgvA7n5lIzDSYs7yRLsr7HJsYPr6SeYSuZizyX1SNz7ooJ32/F3X98 -RB0Mlc/E0OyOrkQ9/y5IrnpnaSora8CnUrV5XNOg+kyCz9edCyx4D5wXYcwZPVWz -8aDqquESrezPyjtfi4WRO4s/VD3HLZvOxzMrWAVYCDG9FxaOhF0QGuuG1F7F3GKV -v6prNyCl016kRl2j1UT+a7gLd8fA25A4C9E= +cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEdMBsGA1UEAxMUVEJTIFg1MDkg +Q0EgYnVzaW5lc3MwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDB1PAU +qudCcz3tmyGcf+u6EkZqonKKHrV4gZYbvVkIRojmmlhfi/jwvpHvo8bqSt/9Rj5S +jhCDW0pcbI+IPPtD1Jy+CHNSfnMqVDy6CKQ3p5maTzCMG6ZT+XjnvcND5v+FtaiB +xk1iCX6uvt0jeUtdZvYbyytsSDE6c3Y5//wRxOF8tM1JxibwO3pyER26jbbN2gQz +m/EkdGjLdJ4svPk23WDAvQ6G0/z2LcAaJB+XLfqRwfQpHQvfKa1uTi8PivC8qtip +rmNQMMPMjxSK2azX8cKjjTDJiUKaCb4VHlJDWKEsCFRpgJAoAuX8f7Yfs1M4esGo +sWb3PGspK3O22uIlAgMBAAGjggF6MIIBdjAdBgNVHQ4EFgQUGgkEzPkJj7seoM7U +pBR/S9UjWM0wDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwGAYD +VR0gBBEwDzANBgsrBgEEAYDlNwIBATB7BgNVHR8EdDByMDigNqA0hjJodHRwOi8v +Y3JsLmNvbW9kb2NhLmNvbS9BZGRUcnVzdEV4dGVybmFsQ0FSb290LmNybDA2oDSg +MoYwaHR0cDovL2NybC5jb21vZG8ubmV0L0FkZFRydXN0RXh0ZXJuYWxDQVJvb3Qu +Y3JsMIGGBggrBgEFBQcBAQR6MHgwOwYIKwYBBQUHMAKGL2h0dHA6Ly9jcnQuY29t +b2RvY2EuY29tL0FkZFRydXN0VVROU2VydmVyQ0EuY3J0MDkGCCsGAQUFBzAChi1o +dHRwOi8vY3J0LmNvbW9kby5uZXQvQWRkVHJ1c3RVVE5TZXJ2ZXJDQS5jcnQwEQYJ +YIZIAYb4QgEBBAQDAgIEMA0GCSqGSIb3DQEBBQUAA4IBAQA7mqrMgk/MrE6QnbNA +h4nRCn2ti4bg4w2C3lB6bSvRPnYwuNw9Jb8vuKkNFzRDxNJXqVDZdfFW5CVQJuyd +nfAx83+wk+spzvFaE1KhFYfN9G9pQfXUfvDRoIcJgPEKUXL1wRiOG+IjU3VVI8pg +IgqHkr7ylln5i5zCiFAPuIJmYUSFg/gxH5xkCNcjJqqrHrHatJr6Qrrke93joupw +oU1njfAcZtYp6fbiK6u2b1pJqwkVBE8RsfLnPhRj+SFbpvjv8Od7o/ieJhFIYQNU +k2jX2u8qZnAiNw93LZW9lpYjtuvMXq8QQppENNja5b53q7UwI+lU7ZGjZ7quuESp +J6/5 -----END CERTIFICATE----- 2 s:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root - i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware -----BEGIN CERTIFICATE----- -MIIEZjCCA06gAwIBAgIQUSYKkxzif5zDpV954HKugjANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +MIIETzCCAzegAwIBAgIQHM5EYpUZep1jUvnyI6m2mDANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw0wNTA2MDcwODA5MTBaFw0xOTA2MjQxOTA2MzBaMG8xCzAJBgNVBAYT -AlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0 -ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB -IFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC39xoz5vIABC05 -4E5b7R+8bA/Ntfojts7emxEzl6QpTH2Tn71KvJPtAxrjj8/lbVBa1pcplFqAsEl6 -2y6V/bjKvzc4LR4+kUGtcFbH8E8/6DKedMrIkFTpxl8PeJ2aQDwOrGGqXhSPnoeh -alDc15pOrwWzpnGUnHGzUGAKxxOdOAeGAqjpqGkmGJCrTLBPI6s6T4TY386f4Wlv -u9dC12tE5Met7m1BX3JacQg3s3llpFmglDf3AC8NwpJy2tA4ctsUqEXEXSp9t7TW -xO6szRNEt8kr3UMAJfphuWlqWCMRt6czj1Z1WfXNKddGtworZbbTQm8Vsrh7++/p -XVPVNFonAgMBAAGjgdgwgdUwHwYDVR0jBBgwFoAUUzLRs89/+uDxoF2FTpLSnkUd -tE8wHQYDVR0OBBYEFK29mHo0tCb3+sQmVO8DveAky1QaMA4GA1UdDwEB/wQEAwIB -BjAPBgNVHRMBAf8EBTADAQH/MBEGCWCGSAGG+EIBAQQEAwIBAjAgBgNVHSUEGTAX -BgorBgEEAYI3CgMDBglghkgBhvhCBAEwPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDov -L2NybC51c2VydHJ1c3QuY29tL1VUTi1EQVRBQ29ycFNHQy5jcmwwDQYJKoZIhvcN -AQEFBQADggEBAMbuUxdoFLJRIh6QWA2U/b3xcOWGLcM2MY9USEbnLQg3vGwKYOEO -rVE04BKT6b64q7gmtOmWPSiPrmQH/uAB7MXjkesYoPF1ftsK5p+R26+udd8jkWjd -FwBaS/9kbHDrARrQkNnHptZt9hPk/7XJ0h4qy7ElQyZ42TCbTg0evmnv3+r+LbPM -+bDdtRTKkdSytaX7ARmjR3mfnYyVhzT4HziS2jamEfpr62vp3EV4FTkG101B5CHI -3C+H0be/SGB1pWLLJN47YaApIKa+xWycxOkKaSLvkTr6Jq/RW0GnOuL4OAdCq8Fb -+M5tug8EPzI0rNwEKNdwMBQmBsTkm5jVz3g= +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNMDUwNjA3MDgwOTEwWhcNMTkwNzA5MTgxOTIyWjBvMQswCQYD +VQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0 +IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5h +bCBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt/caM+by +AAQtOeBOW+0fvGwPzbX6I7bO3psRM5ekKUx9k5+9SryT7QMa44/P5W1QWtaXKZRa +gLBJetsulf24yr83OC0ePpFBrXBWx/BPP+gynnTKyJBU6cZfD3idmkA8Dqxhql4U +j56HoWpQ3NeaTq8Fs6ZxlJxxs1BgCscTnTgHhgKo6ahpJhiQq0ywTyOrOk+E2N/O +n+Fpb7vXQtdrROTHre5tQV9yWnEIN7N5ZaRZoJQ39wAvDcKSctrQOHLbFKhFxF0q +fbe01sTurM0TRLfJK91DACX6YblpalgjEbenM49WdVn1zSnXRrcKK2W200JvFbK4 +e/vv6V1T1TRaJwIDAQABo4G9MIG6MB8GA1UdIwQYMBaAFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMB0GA1UdDgQWBBStvZh6NLQm9/rEJlTvA73gJMtUGjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zARBglghkgBhvhCAQEEBAMCAQIwRAYDVR0f +BD0wOzA5oDegNYYzaHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmly +c3QtSGFyZHdhcmUuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQByQhANOs4kClrwF8BW +onvUOGCSjRK52zYZgDXYNjDtmr5rJ6NyPFDNn+JxkLpjYetIFMTbSRe679Bt8m7a +gIAoQYFQtxMuyLnJegB2aEbQiIxh/tC21UcFF7ktdnDoTlA6w3pLuvunaI84Of3o +2YBrhzkTbCfaYk5JRlTpudW9DkUkHBsyx3nknPKnplkIGaK0jgn8E0n+SFabYaHk +I9LroYT/+JtLefh9lgBdAgVv0UPbzoGfuDsrk/Zh+UrgbLFpHoVnElhzbkh64Z0X +OGaJunQc68cCZu5HTn/aK7fBGMcVflRCXLVEQpU9PIAdGA8Ynvg684t8GMaKsRl1 +jIGZ -----END CERTIFICATE----- - 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC - i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware -----BEGIN CERTIFICATE----- -MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG -EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD -VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu -dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 -E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ -D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK -4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq -lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW -bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB -o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT -MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js -LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr -BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB -AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft -Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj -j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH -KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv -2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 -mfnGV/TJVTl4uix5yaaIK/QI +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG +A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe +MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v +d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh +cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn +0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ +M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a +MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd +oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI +DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy +oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0 +dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy +bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF +BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli +CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE +CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t +3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS +KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA== -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/test_mailbox.py b/lib-python/2.7/test/test_mailbox.py --- a/lib-python/2.7/test/test_mailbox.py +++ b/lib-python/2.7/test/test_mailbox.py @@ -38,14 +38,9 @@ def _delete_recursively(self, target): # Delete a file or delete a directory recursively if os.path.isdir(target): - for path, dirs, files in os.walk(target, topdown=False): - for name in files: - os.remove(os.path.join(path, name)) - for name in dirs: - os.rmdir(os.path.join(path, name)) - os.rmdir(target) + test_support.rmtree(target) elif os.path.exists(target): - os.remove(target) + test_support.unlink(target) class TestMailbox(TestBase): @@ -137,6 +132,7 @@ msg = self._box.get(key1) self.assertEqual(msg['from'], 'foo') self.assertEqual(msg.fp.read(), '1') + msg.fp.close() def test_getitem(self): # Retrieve message using __getitem__() @@ -169,10 +165,14 @@ # Get file representations of messages key0 = self._box.add(self._template % 0) key1 = self._box.add(_sample_message) - self.assertEqual(self._box.get_file(key0).read().replace(os.linesep, '\n'), + msg0 = self._box.get_file(key0) + self.assertEqual(msg0.read().replace(os.linesep, '\n'), self._template % 0) - self.assertEqual(self._box.get_file(key1).read().replace(os.linesep, '\n'), + msg1 = self._box.get_file(key1) + self.assertEqual(msg1.read().replace(os.linesep, '\n'), _sample_message) + msg0.close() + msg1.close() def test_get_file_can_be_closed_twice(self): # Issue 11700 @@ -407,6 +407,7 @@ self._box.add(contents[0]) self._box.add(contents[1]) self._box.add(contents[2]) + oldbox = self._box method() if should_call_close: self._box.close() @@ -415,6 +416,7 @@ self.assertEqual(len(keys), 3) for key in keys: self.assertIn(self._box.get_string(key), contents) + oldbox.close() def test_dump_message(self): # Write message representations to disk @@ -1835,6 +1837,10 @@ def setUp(self): # create a new maildir mailbox to work with: self._dir = test_support.TESTFN + if os.path.isdir(self._dir): + test_support.rmtree(self._dir) + if os.path.isfile(self._dir): + test_support.unlink(self._dir) os.mkdir(self._dir) os.mkdir(os.path.join(self._dir, "cur")) os.mkdir(os.path.join(self._dir, "tmp")) @@ -1844,10 +1850,10 @@ def tearDown(self): map(os.unlink, self._msgfiles) - os.rmdir(os.path.join(self._dir, "cur")) - os.rmdir(os.path.join(self._dir, "tmp")) - os.rmdir(os.path.join(self._dir, "new")) - os.rmdir(self._dir) + test_support.rmdir(os.path.join(self._dir, "cur")) + test_support.rmdir(os.path.join(self._dir, "tmp")) + test_support.rmdir(os.path.join(self._dir, "new")) + test_support.rmdir(self._dir) def createMessage(self, dir, mbox=False): t = int(time.time() % 1000000) @@ -1883,7 +1889,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1891,7 +1899,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1900,8 +1910,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 2) - self.assertIsNot(self.mbox.next(), None) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1910,11 +1924,13 @@ import email.parser fname = self.createMessage("cur", True) n = 0 - for msg in mailbox.PortableUnixMailbox(open(fname), + fid = open(fname) + for msg in mailbox.PortableUnixMailbox(fid, email.parser.Parser().parse): n += 1 self.assertEqual(msg["subject"], "Simple Test") self.assertEqual(len(str(msg)), len(FROM_)+len(DUMMY_MESSAGE)) + fid.close() self.assertEqual(n, 1) ## End: classes from the original module (for backward compatibility). diff --git a/lib-python/2.7/test/test_mmap.py b/lib-python/2.7/test/test_mmap.py --- a/lib-python/2.7/test/test_mmap.py +++ b/lib-python/2.7/test/test_mmap.py @@ -11,7 +11,7 @@ def setUp(self): if os.path.exists(TESTFN): - os.unlink(TESTFN) + unlink(TESTFN) def tearDown(self): try: diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # diff --git a/lib-python/2.7/test/test_old_mailbox.py b/lib-python/2.7/test/test_old_mailbox.py --- a/lib-python/2.7/test/test_old_mailbox.py +++ b/lib-python/2.7/test/test_old_mailbox.py @@ -73,7 +73,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -81,7 +83,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -90,8 +94,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 2) - self.assertTrue(self.mbox.next() is not None) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -75,7 +75,7 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + open(name, "w").close() self.files.append(name) def test_tempnam(self): diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -111,13 +111,12 @@ if test_support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") self.assertEqual(p['subject'], - ((('countryName', u'US'),), - (('stateOrProvinceName', u'Delaware'),), - (('localityName', u'Wilmington'),), - (('organizationName', u'Python Software Foundation'),), - (('organizationalUnitName', u'SSL'),), - (('commonName', u'somemachine.python.org'),)), + ((('countryName', 'XY'),), + (('localityName', 'Castle Anthrax'),), + (('organizationName', 'Python Software Foundation'),), + (('commonName', 'localhost'),)) ) + self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),)) # Issue #13034: the subjectAltName in some certificates # (notably projects.developer.nokia.com:443) wasn't parsed p = ssl._ssl._test_decode_cert(NOKIACERT) diff --git a/lib-python/2.7/test/test_support.py b/lib-python/2.7/test/test_support.py --- a/lib-python/2.7/test/test_support.py +++ b/lib-python/2.7/test/test_support.py @@ -179,15 +179,79 @@ except KeyError: pass +if sys.platform.startswith("win"): + def _waitfor(func, pathname, waitall=False): + # Peform the operation + func(pathname) + # Now setup the wait loop + if waitall: + dirname = pathname + else: + dirname, name = os.path.split(pathname) + dirname = dirname or '.' + # Check for `pathname` to be removed from the filesystem. + # The exponential backoff of the timeout amounts to a total + # of ~1 second after which the deletion is probably an error + # anyway. + # Testing on a i7 at 4.3GHz shows that usually only 1 iteration is + # required when contention occurs. + timeout = 0.001 + while timeout < 1.0: + # Note we are only testing for the existance of the file(s) in + # the contents of the directory regardless of any security or + # access rights. If we have made it this far, we have sufficient + # permissions to do that much using Python's equivalent of the + # Windows API FindFirstFile. + # Other Windows APIs can fail or give incorrect results when + # dealing with files that are pending deletion. + L = os.listdir(dirname) + if not (L if waitall else name in L): + return + # Increase the timeout and try again + time.sleep(timeout) + timeout *= 2 + warnings.warn('tests may fail, delete still pending for ' + pathname, + RuntimeWarning, stacklevel=4) + + def _unlink(filename): + _waitfor(os.unlink, filename) + + def _rmdir(dirname): + _waitfor(os.rmdir, dirname) + + def _rmtree(path): + def _rmtree_inner(path): + for name in os.listdir(path): + fullname = os.path.join(path, name) + if os.path.isdir(fullname): + _waitfor(_rmtree_inner, fullname, waitall=True) + os.rmdir(fullname) + else: + os.unlink(fullname) + _waitfor(_rmtree_inner, path, waitall=True) + _waitfor(os.rmdir, path) +else: + _unlink = os.unlink + _rmdir = os.rmdir + _rmtree = shutil.rmtree + def unlink(filename): try: - os.unlink(filename) + _unlink(filename) except OSError: pass +def rmdir(dirname): + try: + _rmdir(dirname) + except OSError as error: + # The directory need not exist. + if error.errno != errno.ENOENT: + raise + def rmtree(path): try: - shutil.rmtree(path) + _rmtree(path) except OSError, e: # Unix returns ENOENT, Windows returns ESRCH. if e.errno not in (errno.ENOENT, errno.ESRCH): diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/2.7/test/test_tarfile.py --- a/lib-python/2.7/test/test_tarfile.py +++ b/lib-python/2.7/test/test_tarfile.py @@ -300,26 +300,21 @@ def test_extract_hardlink(self): # Test hardlink extraction (e.g. bug #857297). - tar = tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") + with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar: + tar.extract("ustar/regtype", TEMPDIR) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/regtype")) - tar.extract("ustar/regtype", TEMPDIR) - try: tar.extract("ustar/lnktype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("hardlink not extracted properly") + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/lnktype")) + with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) - data = open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) - - try: tar.extract("ustar/symtype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("symlink not extracted properly") - - data = open(os.path.join(TEMPDIR, "ustar/symtype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/symtype")) + with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) def test_extractall(self): # Test if extractall() correctly restores directory permissions @@ -340,7 +335,7 @@ # constructor in case of an error. For the test we rely on # the fact that opening an empty file raises a ReadError. empty = os.path.join(TEMPDIR, "empty") - open(empty, "wb").write("") + open(empty, "wb").close() try: tar = object.__new__(tarfile.TarFile) @@ -351,7 +346,7 @@ else: self.fail("ReadError not raised") finally: - os.remove(empty) + test_support.unlink(empty) class StreamReadTest(CommonReadTest): @@ -1327,7 +1322,7 @@ def setUp(self): self.tarname = tmpname if os.path.exists(self.tarname): - os.remove(self.tarname) + test_support.unlink(self.tarname) def _add_testfile(self, fileobj=None): tar = tarfile.open(self.tarname, "a", fileobj=fileobj) diff --git a/lib-python/2.7/traceback.py b/lib-python/2.7/traceback.py --- a/lib-python/2.7/traceback.py +++ b/lib-python/2.7/traceback.py @@ -107,7 +107,7 @@ return list -def print_exception(etype, value, tb, limit=None, file=None): +def print_exception(etype, value, tb, limit=None, file=None, _encoding=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if @@ -123,7 +123,7 @@ if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) - lines = format_exception_only(etype, value) + lines = format_exception_only(etype, value, _encoding) for line in lines: _print(file, line, '') @@ -144,7 +144,7 @@ list = list + format_exception_only(etype, value) return list -def format_exception_only(etype, value): +def format_exception_only(etype, value, _encoding=None): """Format the exception part of a traceback. The arguments are the exception type and value such as given by @@ -170,12 +170,12 @@ if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or etype is None or type(etype) is str): - return [_format_final_exc_line(etype, value)] + return [_format_final_exc_line(etype, value, _encoding)] stype = etype.__name__ if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] + return [_format_final_exc_line(stype, value, _encoding)] # It was a syntax error; show exactly where the problem was found. lines = [] @@ -196,26 +196,26 @@ lines.append(' %s^\n' % ''.join(caretspace)) value = msg - lines.append(_format_final_exc_line(stype, value)) + lines.append(_format_final_exc_line(stype, value, _encoding)) return lines -def _format_final_exc_line(etype, value): +def _format_final_exc_line(etype, value, _encoding=None): """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) + valuestr = _some_str(value, _encoding) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line -def _some_str(value): +def _some_str(value, _encoding=None): try: return str(value) except Exception: pass try: value = unicode(value) - return value.encode("ascii", "backslashreplace") + return value.encode(_encoding or "ascii", "backslashreplace") except Exception: pass return '' % type(value).__name__ diff --git a/lib-python/2.7/uuid.py b/lib-python/2.7/uuid.py --- a/lib-python/2.7/uuid.py +++ b/lib-python/2.7/uuid.py @@ -128,10 +128,10 @@ """ if hex is not None: - if (bytes is not None or bytes_le is not None or fields is not None - or int is not None): - raise TypeError('if the hex argument is given, bytes, bytes_le, fields,' - ' and int need to be None') + if (bytes is not None or bytes_le is not None or + fields is not None or int is not None): + raise TypeError('if the hex argument is given, bytes,' + ' bytes_le, fields, and int need to be None') hex = hex.replace('urn:', '').replace('uuid:', '') hex = hex.strip('{}').replace('-', '') if len(hex) != 32: @@ -139,8 +139,8 @@ int = long(hex, 16) elif bytes_le is not None: if bytes is not None or fields is not None or int is not None: - raise TypeError('if the bytes_le argument is given, bytes, fields,' - ' and int need to be None') + raise TypeError('if the bytes_le argument is given, bytes,' + ' fields, and int need to be None') if len(bytes_le) != 16: raise ValueError('bytes_le is not a 16-char string') bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] + @@ -150,15 +150,16 @@ struct.unpack('>Q', bytes[8:])[0]) elif bytes is not None: if fields is not None or int is not None: - raise TypeError('if the bytes argument is given, fields' - ' and int need to be None') + raise TypeError('if the bytes argument is given, fields ' + 'and int need to be None') if len(bytes) != 16: raise ValueError('bytes is not a 16-char string') int = (struct.unpack('>Q', bytes[:8])[0] << 64 | struct.unpack('>Q', bytes[8:])[0]) elif fields is not None: if int is not None: - raise TypeError('if the fields argument is given, int needs to be None') + raise TypeError('if the fields argument is given, int needs' + ' to be None') if len(fields) != 6: raise ValueError('fields is not a 6-tuple') (time_low, time_mid, time_hi_version, diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -48,7 +48,14 @@ def remove(wr, selfref=ref(self)): self = selfref() if self is not None: - del self.data[wr.key] + # Changed this for PyPy: made more resistent. The + # issue is that in some corner cases, self.data + # might already be changed or removed by the time + # this weakref's callback is called. If that is + # the case, we don't want to randomly kill an + # unrelated entry. + if self.data.get(wr.key) is wr: + del self.data[wr.key] self._remove = remove UserDict.UserDict.__init__(self, *args, **kw) @@ -160,22 +167,26 @@ try: o = self.data.pop(key)() except KeyError: + o = None + if o is None: if args: return args[0] - raise - if o is None: raise KeyError, key else: return o + # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: - wr = self.data[key] + o = self.data[key]() except KeyError: + o = None + if o is None: self.data[key] = KeyedRef(default, self._remove, key) return default else: - return wr() + return o + # The logic above was fixed in PyPy def update(self, dict=None, **kwargs): d = self.data diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -268,10 +268,18 @@ if _has_load_extension(): _ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);") -_lib = _ffi.verify(""" -#include -""", libraries=['sqlite3'] -) +if sys.platform.startswith('freebsd'): + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'] + ) +else: + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'] + ) exported_sqlite_symbols = [ 'SQLITE_ALTER_TABLE', @@ -363,9 +371,11 @@ pass -def connect(database, **kwargs): - factory = kwargs.get("factory", Connection) - return factory(database, **kwargs) +def connect(database, timeout=5.0, detect_types=0, isolation_level="", + check_same_thread=True, factory=None, cached_statements=100): + factory = Connection if not factory else factory + return factory(database, timeout, detect_types, isolation_level, + check_same_thread, factory, cached_statements) def _unicode_text_factory(x): diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py --- a/lib_pypy/_tkinter/__init__.py +++ b/lib_pypy/_tkinter/__init__.py @@ -22,6 +22,7 @@ READABLE = tklib.TCL_READABLE WRITABLE = tklib.TCL_WRITABLE EXCEPTION = tklib.TCL_EXCEPTION +DONT_WAIT = tklib.TCL_DONT_WAIT def create(screenName=None, baseName=None, className=None, interactive=False, wantobjects=False, wantTk=True, diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -4,7 +4,23 @@ from . import TclError from .tclobj import TclObject, FromObj, AsObj, TypeCache +import contextlib import sys +import threading +import time + + +class _DummyLock(object): + "A lock-like object that does not do anything" + def acquire(self): + pass + def release(self): + pass + def __enter__(self): + pass + def __exit__(self, *exc): + pass + def varname_converter(input): if isinstance(input, TclObject): @@ -37,17 +53,18 @@ def PythonCmd(clientData, interp, argc, argv): self = tkffi.from_handle(clientData) assert self.app.interp == interp - try: - args = [tkffi.string(arg) for arg in argv[1:argc]] - result = self.func(*args) - obj = AsObj(result) - tklib.Tcl_SetObjResult(interp, obj) - except: - self.app.errorInCmd = True - self.app.exc_info = sys.exc_info() - return tklib.TCL_ERROR - else: - return tklib.TCL_OK + with self.app._tcl_lock_released(): + try: + args = [tkffi.string(arg) for arg in argv[1:argc]] + result = self.func(*args) + obj = AsObj(result) + tklib.Tcl_SetObjResult(interp, obj) + except: + self.app.errorInCmd = True + self.app.exc_info = sys.exc_info() + return tklib.TCL_ERROR + else: + return tklib.TCL_OK @tkffi.callback("Tcl_CmdDeleteProc") def PythonCmdDelete(clientData): @@ -58,6 +75,8 @@ class TkApp(object): + _busywaitinterval = 0.02 # 20ms. + def __new__(cls, screenName, baseName, className, interactive, wantobjects, wantTk, sync, use): if not wantobjects: @@ -73,6 +92,12 @@ self.quitMainLoop = False self.errorInCmd = False + if not self.threaded: + # TCL is not thread-safe, calls needs to be serialized. + self._tcl_lock = threading.Lock() + else: + self._tcl_lock = _DummyLock() + self._typeCache = TypeCache() self._commands = {} @@ -133,6 +158,13 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise RuntimeError("Calling Tcl from different appartment") + @contextlib.contextmanager + def _tcl_lock_released(self): + "Context manager to temporarily release the tcl lock." + self._tcl_lock.release() + yield + self._tcl_lock.acquire() + def loadtk(self): # We want to guard against calling Tk_Init() multiple times err = tklib.Tcl_Eval(self.interp, "info exists tk_version") @@ -159,22 +191,25 @@ flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) - if not res: - self.raiseTclError() - assert self._wantobjects - return FromObj(self, res) + with self._tcl_lock: + res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) + if not res: + self.raiseTclError() + assert self._wantobjects + return FromObj(self, res) def _setvar(self, name1, value, global_only=False): name1 = varname_converter(name1) + # XXX Acquire tcl lock??? newval = AsObj(value) flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, - newval, flags) - if not res: - self.raiseTclError() + with self._tcl_lock: + res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, + newval, flags) + if not res: + self.raiseTclError() def _unsetvar(self, name1, name2=None, global_only=False): name1 = varname_converter(name1) @@ -183,9 +218,10 @@ flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) - if res == tklib.TCL_ERROR: - self.raiseTclError() + with self._tcl_lock: + res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() def getvar(self, name1, name2=None): return self._var_invoke(self._getvar, name1, name2) @@ -219,9 +255,10 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise NotImplementedError("Call from another thread") - res = tklib.Tcl_CreateCommand( - self.interp, cmdName, _CommandData.PythonCmd, - clientData, _CommandData.PythonCmdDelete) + with self._tcl_lock: + res = tklib.Tcl_CreateCommand( + self.interp, cmdName, _CommandData.PythonCmd, + clientData, _CommandData.PythonCmdDelete) if not res: raise TclError("can't create Tcl command") @@ -229,7 +266,8 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise NotImplementedError("Call from another thread") - res = tklib.Tcl_DeleteCommand(self.interp, cmdName) + with self._tcl_lock: + res = tklib.Tcl_DeleteCommand(self.interp, cmdName) if res == -1: raise TclError("can't delete Tcl command") @@ -256,11 +294,12 @@ tklib.Tcl_IncrRefCount(obj) objects[i] = obj - res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) - if res == tklib.TCL_ERROR: - self.raiseTclError() - else: - result = self._callResult() + with self._tcl_lock: + res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + else: + result = self._callResult() finally: for obj in objects: if obj: @@ -280,17 +319,19 @@ def eval(self, script): self._check_tcl_appartment() - res = tklib.Tcl_Eval(self.interp, script) - if res == tklib.TCL_ERROR: - self.raiseTclError() - return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + with self._tcl_lock: + res = tklib.Tcl_Eval(self.interp, script) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) def evalfile(self, filename): self._check_tcl_appartment() - res = tklib.Tcl_EvalFile(self.interp, filename) - if res == tklib.TCL_ERROR: - self.raiseTclError() - return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + with self._tcl_lock: + res = tklib.Tcl_EvalFile(self.interp, filename) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) def split(self, arg): if isinstance(arg, tuple): @@ -375,7 +416,10 @@ if self.threaded: result = tklib.Tcl_DoOneEvent(0) else: - raise NotImplementedError("TCL configured without threads") + with self._tcl_lock: + result = tklib.Tcl_DoOneEvent(tklib.TCL_DONT_WAIT) + if result == 0: + time.sleep(self._busywaitinterval) if result < 0: break diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py --- a/lib_pypy/_tkinter/tclobj.py +++ b/lib_pypy/_tkinter/tclobj.py @@ -28,9 +28,11 @@ return result elif value.typePtr == typeCache.BooleanType: - return result + return bool(value.internalRep.longValue) elif value.typePtr == typeCache.ByteArrayType: - return result + size = tkffi.new('int*') + data = tklib.Tcl_GetByteArrayFromObj(value, size) + return tkffi.buffer(data, size[0])[:] elif value.typePtr == typeCache.DoubleType: return value.internalRep.doubleValue elif value.typePtr == typeCache.IntType: @@ -50,7 +52,7 @@ result.append(FromObj(app, tcl_elem[0])) return tuple(result) elif value.typePtr == typeCache.ProcBodyType: - return result + pass # fall through and return tcl object. elif value.typePtr == typeCache.StringType: buf = tklib.Tcl_GetUnicode(value) length = tklib.Tcl_GetCharLength(value) diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -1,6 +1,7 @@ # C bindings with libtcl and libtk. from cffi import FFI +import sys tkffi = FFI() @@ -18,6 +19,8 @@ #define TCL_EVAL_DIRECT ... #define TCL_EVAL_GLOBAL ... +#define TCL_DONT_WAIT ... + typedef unsigned short Tcl_UniChar; typedef ... Tcl_Interp; typedef ...* Tcl_ThreadId; @@ -69,6 +72,7 @@ int Tcl_GetBoolean(Tcl_Interp* interp, const char* src, int* boolPtr); char *Tcl_GetString(Tcl_Obj* objPtr); char *Tcl_GetStringFromObj(Tcl_Obj* objPtr, int* lengthPtr); +unsigned char *Tcl_GetByteArrayFromObj(Tcl_Obj* objPtr, int* lengthPtr); Tcl_UniChar *Tcl_GetUnicode(Tcl_Obj* objPtr); int Tcl_GetCharLength(Tcl_Obj* objPtr); @@ -102,6 +106,25 @@ int Tk_GetNumMainWindows(); """) +# XXX find a better way to detect paths +# XXX pick up CPPFLAGS and LDFLAGS and add to these paths? +if sys.platform.startswith("openbsd"): + incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] + linklibs = ['tk85', 'tcl85'] + libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform.startswith("freebsd"): + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include'] + linklibs = ['tk86', 'tcl86'] + libdirs = ['/usr/local/lib'] +elif sys.platform == 'win32': + incdirs = [] + linklibs = ['tcl85', 'tk85'] + libdirs = [] +else: + incdirs=['/usr/include/tcl'] + linklibs=['tcl', 'tk'] + libdirs = [] + tklib = tkffi.verify(""" #include #include @@ -109,6 +132,7 @@ char *get_tk_version() { return TK_VERSION; } char *get_tcl_version() { return TCL_VERSION; } """, -include_dirs=['/usr/include/tcl'], -libraries=['tcl', 'tk'], +include_dirs=incdirs, +libraries=linklibs, +library_dirs = libdirs ) diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.7 +Version: 0.8 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7" -__version_info__ = (0, 7) +__version__ = "0.8" +__version_info__ = (0, 8) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,5 @@ import types +from .lock import allocate_lock try: callable @@ -54,12 +55,14 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert backend.__version__ == __version__ + assert (backend.__version__ == __version__ or + backend.__version__ == __version__[:3]) # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) self._backend = backend + self._lock = allocate_lock() self._parser = cparser.Parser() self._cached_btypes = {} self._parsed_types = types.ModuleType('parsed_types').__dict__ @@ -73,7 +76,8 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - self.BVoidP = self._get_cached_btype(model.voidp_type) + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -94,11 +98,12 @@ if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') - self._parser.parse(csource, override=override) - self._cdefsources.append(csource) - if override: - for cache in self._function_caches: - cache.clear() + with self._lock: + self._parser.parse(csource, override=override) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -108,31 +113,47 @@ library we only look for the actual (untyped) symbols. """ assert isinstance(name, basestring) or name is None - lib, function_cache = _make_ffi_library(self, name, flags) - self._function_caches.append(function_cache) - self._libraries.append(lib) + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) return lib + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + really_a_function_type = type.is_raw_function + if really_a_function_type: + type = type.as_function_pointer() + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, cfaf = self._parsed_types[cdecl] - if consider_function_as_funcptr and not cfaf: - raise KeyError + result = self._parsed_types[cdecl] except KeyError: - key = cdecl - if not isinstance(cdecl, str): # unicode, on Python 2 - cdecl = cdecl.encode('ascii') - cfaf = consider_function_as_funcptr - type = self._parser.parse_type(cdecl, - consider_function_as_funcptr=cfaf) - btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, cfaf + with self._lock: + result = self._typeof_locked(cdecl) + # + btype, really_a_function_type = result + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) return btype def typeof(self, cdecl): """Parse the C type given as a string and return the - corresponding Python type: '>. + corresponding object. It can also be used on 'cdata' instance to get its C type. """ if isinstance(cdecl, basestring): @@ -143,6 +164,10 @@ res = _builtin_function_type(cdecl) if res is not None: return res + if (isinstance(cdecl, types.FunctionType) + and hasattr(cdecl, '_cffi_base_type')): + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) def sizeof(self, cdecl): @@ -279,14 +304,17 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + with self._lock: + try: + gc_weakrefs = self.gc_weakrefs + except AttributeError: + from .gc_weakref import GcWeakrefs + gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) + return gc_weakrefs.build(cdata, destructor) def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! try: BType = self._cached_btypes[type] except KeyError: @@ -319,9 +347,13 @@ errno = property(_get_errno, _set_errno, None, "the value of 'errno' from/to the C calls") + def getwinerror(self, code=-1): + return self._backend.getwinerror(code) + def _pointer_to(self, ctype): from . import model - return model.pointer_cache(self, ctype) + with self._lock: + return model.pointer_cache(self, ctype) def addressof(self, cdata, field=None): """Return the address of a . @@ -341,10 +373,12 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ - self._parser.include(ffi_to_include._parser) - self._cdefsources.append('[') - self._cdefsources.extend(ffi_to_include._cdefsources) - self._cdefsources.append(']') + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) @@ -371,7 +405,7 @@ backendlib = backend.load_library(path, flags) copied_enums = [] # - def make_accessor(name): + def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: tp = ffi._parser._declarations[key] @@ -403,11 +437,17 @@ if enumname not in library.__dict__: library.__dict__[enumname] = enumval copied_enums.append(True) + if name in library.__dict__: + return # - if name in library.__dict__: # copied from an enum value just above, - return # or multithread's race condition raise AttributeError(name) # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + make_accessor_locked(name) + # class FFILibrary(object): def __getattr__(self, name): make_accessor(name) @@ -443,4 +483,5 @@ except (KeyError, AttributeError, TypeError): return None else: - return ffi._get_cached_btype(tp) + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/lib_pypy/cffi/commontypes.py b/lib_pypy/cffi/commontypes.py --- a/lib_pypy/cffi/commontypes.py +++ b/lib_pypy/cffi/commontypes.py @@ -30,7 +30,9 @@ elif result in model.PrimitiveType.ALL_PRIMITIVE_TYPES: result = model.PrimitiveType(result) else: - assert commontype != result + if commontype == result: + raise api.FFIError("Unsupported type: %r. Please file a bug " + "if you think it should be." % (commontype,)) result = resolve_common_type(result) # recursively assert isinstance(result, model.BaseTypeByIdentity) _CACHE[commontype] = result diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -142,7 +142,7 @@ if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] if line: - msg = 'cannot parse "%s"\n%s' % (line, msg) + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) @@ -217,19 +217,18 @@ # if decl.name: tp = self._get_type(node, partial_length_ok=True) - if self._is_constant_declaration(node): + if self._is_constant_globalvar(node): self._declare('constant ' + decl.name, tp) else: self._declare('variable ' + decl.name, tp) - def parse_type(self, cdecl, consider_function_as_funcptr=False): + def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type, - consider_function_as_funcptr=consider_function_as_funcptr) + return self._get_type(exprnode.type) def _declare(self, name, obj): if name in self._declarations: @@ -249,28 +248,17 @@ return model.ConstPointerType(type) return model.PointerType(type) - def _get_type(self, typenode, convert_array_to_pointer=False, - name=None, partial_length_ok=False, - consider_function_as_funcptr=False): + def _get_type(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): type = self._declarations['typedef ' + typenode.type.names[0]] - if isinstance(type, model.ArrayType): - if convert_array_to_pointer: - return type.item - else: - if (consider_function_as_funcptr and - isinstance(type, model.RawFunctionType)): - return type.as_function_pointer() return type # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type - if convert_array_to_pointer: - return self._get_type_pointer(self._get_type(typenode.type)) if typenode.dim is None: length = None else: @@ -290,13 +278,26 @@ # assume a primitive type. get it from .names, but reduce # synonyms to a single chosen combination names = list(type.names) - if names == ['signed'] or names == ['unsigned']: - names.append('int') - if names[0] == 'signed' and names != ['signed', 'char']: - names.pop(0) - if (len(names) > 1 and names[-1] == 'int' - and names != ['unsigned', 'int']): - names.pop() + if names != ['signed', 'char']: # keep this unmodified + prefixes = {} + while names: + name = names[0] + if name in ('short', 'long', 'signed', 'unsigned'): + prefixes[name] = prefixes.get(name, 0) + 1 + del names[0] + else: + break + # ignore the 'signed' prefix below, and reorder the others + newnames = [] + for prefix in ('unsigned', 'short', 'long'): + for i in range(prefixes.get(prefix, 0)): + newnames.append(prefix) + if not names: + names = ['int'] # implicitly + if names == ['int']: # but kill it if 'short' or 'long' + if 'short' in prefixes or 'long' in prefixes: + names = [] + names = newnames + names ident = ' '.join(names) if ident == 'void': return model.void_type @@ -318,10 +319,7 @@ # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - result = self._parse_function_type(typenode, name) - if consider_function_as_funcptr: - result = result.as_function_pointer() - return result + return self._parse_function_type(typenode, name) # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): @@ -352,21 +350,24 @@ isinstance(params[0].type.type, pycparser.c_ast.IdentifierType) and list(params[0].type.type.names) == ['void']): del params[0] - args = [self._get_type(argdeclnode.type, - convert_array_to_pointer=True, - consider_function_as_funcptr=True) + args = [self._as_func_arg(self._get_type(argdeclnode.type)) for argdeclnode in params] result = self._get_type(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _is_constant_declaration(self, typenode, const=False): - if isinstance(typenode, pycparser.c_ast.ArrayDecl): - return self._is_constant_declaration(typenode.type) + def _as_func_arg(self, type): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _is_constant_globalvar(self, typenode): if isinstance(typenode, pycparser.c_ast.PtrDecl): - const = 'const' in typenode.quals - return self._is_constant_declaration(typenode.type, const) + return 'const' in typenode.quals if isinstance(typenode, pycparser.c_ast.TypeDecl): - return const or 'const' in typenode.quals + return 'const' in typenode.quals return False def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): @@ -478,7 +479,7 @@ return tp def _make_partial(self, tp, nested): - if not isinstance(tp, model.StructType): + if not isinstance(tp, model.StructOrUnion): raise api.CDefError("%s cannot be partial" % (tp,)) if not tp.has_c_name() and not nested: raise NotImplementedError("%s is partial but has no C name" %(tp,)) @@ -498,10 +499,10 @@ if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): self._partial_length = True - return None + return '...' # - raise api.FFIError("unsupported non-constant or " - "not immediately constant expression") + raise api.FFIError("unsupported expression: expected a " + "simple numeric constant") def _build_enum_type(self, explicit_name, decls): if decls is not None: diff --git a/lib_pypy/cffi/gc_weakref.py b/lib_pypy/cffi/gc_weakref.py --- a/lib_pypy/cffi/gc_weakref.py +++ b/lib_pypy/cffi/gc_weakref.py @@ -14,6 +14,6 @@ def build(self, cdata, destructor): # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi.typeof(cdata), cdata) + new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) self.data[ref(new_cdata, self.remove)] = destructor, cdata return new_cdata diff --git a/lib_pypy/cffi/lock.py b/lib_pypy/cffi/lock.py new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,7 +1,10 @@ import weakref +from .lock import allocate_lock + class BaseTypeByIdentity(object): is_array_type = False + is_raw_function = False def get_c_name(self, replace_with='', context='a C file'): result = self.c_name_with_marker @@ -146,6 +149,7 @@ # a function, but not a pointer-to-function. The backend has no # notion of such a type; it's used temporarily by parsing. _base_pattern = '(&)(%s)' + is_raw_function = True def build_backend_type(self, ffi, finishlist): from . import api @@ -192,10 +196,6 @@ _base_pattern = " const *&" _base_pattern_array = "(const *&)" - def build_backend_type(self, ffi, finishlist): - BPtr = PointerType(self.totype).get_cached_btype(ffi, finishlist) - return BPtr - const_voidp_type = ConstPointerType(void_type) @@ -216,10 +216,12 @@ self.item = item self.length = length # - if self.length is None: + if length is None: brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' else: - brackets = '&[%d]' % self.length + brackets = '&[%d]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) @@ -227,6 +229,10 @@ return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): + if self.length == '...': + from . import api + raise api.CDefError("cannot render the type %r: unknown length" % + (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) @@ -252,6 +258,7 @@ class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None completed = False + partial = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -303,20 +310,21 @@ return # not completing it: it's an opaque struct # self.completed = 1 - fldtypes = tuple(tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes) # if self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) ffi._backend.complete_struct_or_union(BType, lst, self) # else: + fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # - if isinstance(ftype, ArrayType) and ftype.length is None: + if isinstance(ftype, ArrayType) and ftype.length == '...': # fix the length to match the total size BItemType = ftype.item.get_cached_btype(ffi, finishlist) nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) @@ -327,18 +335,20 @@ ftype = ftype.resolve_length(nlen) self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) - BArrayType = ftype.get_cached_btype(ffi, finishlist) - fldtypes = (fldtypes[:i] + (BArrayType,) + - fldtypes[i+1:]) - continue # - bitemsize = ffi.sizeof(fldtypes[i]) - if bitemsize != fsize: - self._verification_error( - "field '%s.%s' is declared as %d bytes, but is " - "really %d bytes" % (self.name, - self.fldnames[i] or '{}', - bitemsize, fsize)) + BFieldType = ftype.get_cached_btype(ffi, finishlist) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) + fldtypes.append(BFieldType) + # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) From noreply at buildbot.pypy.org Wed Nov 20 16:19:52 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 20 Nov 2013 16:19:52 +0100 (CET) Subject: [pypy-commit] pypy refactor-translator: hg merge default Message-ID: <20131120151952.5BE0D1C0144@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-translator Changeset: r68260:087b6a2d8558 Date: 2013-11-20 16:11 +0100 http://bitbucket.org/pypy/pypy/changeset/087b6a2d8558/ Log: hg merge default diff too long, truncating to 2000 out of 43780 lines diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,7 +1,38 @@ all: pypy-c +PYPY_EXECUTABLE := $(shell which pypy) +URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") + +ifeq ($(PYPY_EXECUTABLE),) +RUNINTERP = python +else +RUNINTERP = $(PYPY_EXECUTABLE) +endif + pypy-c: - @echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM" - @sleep 3 - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + @echo + @echo "====================================================================" +ifeq ($(PYPY_EXECUTABLE),) + @echo "Building a regular (jitting) version of PyPy, using CPython." + @echo "This takes around 2 hours and $(URAM) GB of RAM." + @echo "Note that pre-installing a PyPy binary would reduce this time" + @echo "and produce basically the same result." +else + @echo "Building a regular (jitting) version of PyPy, using" + @echo "$(PYPY_EXECUTABLE) to run the translation itself." + @echo "This takes up to 1 hour and $(URAM) GB of RAM." +endif + @echo + @echo "For more control (e.g. to use multiple CPU cores during part of" + @echo "the process) you need to run \`\`rpython/bin/rpython'' directly." + @echo "For more information see \`\`http://pypy.org/download.html''." + @echo "====================================================================" + @echo + @sleep 5 + $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + +# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are +# replaced with an opaque --jobserver option by the time this Makefile +# runs. We cannot get their original value either: +# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -1,5 +1,5 @@ ===================================== -PyPy: Python in Python Implementation +PyPy: Python in Python Implementation ===================================== Welcome to PyPy! @@ -26,12 +26,14 @@ Building ======== -build with:: +build with: - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py +.. code-block:: console + + $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest -to use virtualenv with the resulting pypy-c as the interpreter, you can +to use virtualenv with the resulting pypy-c as the interpreter; you can find more details about various installation schemes here: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy diff --git a/lib-python/2.7/argparse.py b/lib-python/2.7/argparse.py --- a/lib-python/2.7/argparse.py +++ b/lib-python/2.7/argparse.py @@ -1780,7 +1780,19 @@ # error if this argument is not allowed with other previously # seen arguments, assuming that actions that use the default # value don't really count as "present" - if argument_values is not action.default: + + # XXX PyPy bug-to-bug compatibility: "is" on primitive types + # is not consistent in CPython. We'll assume it is close + # enough for ints (which is true only for "small ints"), but + # for floats and longs and complexes we'll go for the option + # of forcing "is" to say False, like it usually does on + # CPython. A fix is pending on CPython trunk + # (http://bugs.python.org/issue18943) but that might change + # the details of the semantics and so not be applied to 2.7. + # See the line AA below. + + if (argument_values is not action.default or + type(argument_values) in (float, long, complex)): # AA seen_non_default_actions.add(action) for conflict_action in action_conflicts.get(action, []): if conflict_action in seen_non_default_actions: diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -4,6 +4,21 @@ from __pypy__.builders import StringBuilder, UnicodeBuilder +class StringOrUnicodeBuilder(object): + def __init__(self): + self._builder = StringBuilder() + def append(self, string): + try: + self._builder.append(string) + except UnicodeEncodeError: + ub = UnicodeBuilder() + ub.append(self._builder.build()) + self._builder = ub + ub.append(string) + def build(self): + return self._builder.build() + + ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') @@ -192,7 +207,7 @@ if self.ensure_ascii: builder = StringBuilder() else: - builder = UnicodeBuilder() + builder = StringOrUnicodeBuilder() self.__encode(o, markers, builder, 0) return builder.build() diff --git a/lib-python/2.7/string.py b/lib-python/2.7/string.py --- a/lib-python/2.7/string.py +++ b/lib-python/2.7/string.py @@ -66,16 +66,17 @@ must be of the same length. """ - if len(fromstr) != len(tostr): + n = len(fromstr) + if n != len(tostr): raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) + # this function has been rewritten to suit PyPy better; it is + # almost 10x faster than the original. + buf = bytearray(256) + for i in range(256): + buf[i] = i + for i in range(n): + buf[ord(fromstr[i])] = tostr[i] + return str(buf) diff --git a/lib-python/2.7/test/keycert.pem b/lib-python/2.7/test/keycert.pem --- a/lib-python/2.7/test/keycert.pem +++ b/lib-python/2.7/test/keycert.pem @@ -1,32 +1,31 @@ ------BEGIN RSA PRIVATE KEY----- -MIICXwIBAAKBgQC8ddrhm+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9L -opdJhTvbGfEj0DQs1IE8M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVH -fhi/VwovESJlaBOp+WMnfhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQAB -AoGBAK0FZpaKj6WnJZN0RqhhK+ggtBWwBnc0U/ozgKz2j1s3fsShYeiGtW6CK5nU -D1dZ5wzhbGThI7LiOXDvRucc9n7vUgi0alqPQ/PFodPxAN/eEYkmXQ7W2k7zwsDA -IUK0KUhktQbLu8qF/m8qM86ba9y9/9YkXuQbZ3COl5ahTZrhAkEA301P08RKv3KM -oXnGU2UHTuJ1MAD2hOrPxjD4/wxA/39EWG9bZczbJyggB4RHu0I3NOSFjAm3HQm0 -ANOu5QK9owJBANgOeLfNNcF4pp+UikRFqxk5hULqRAWzVxVrWe85FlPm0VVmHbb/ -loif7mqjU8o1jTd/LM7RD9f2usZyE2psaw8CQQCNLhkpX3KO5kKJmS9N7JMZSc4j -oog58yeYO8BBqKKzpug0LXuQultYv2K4veaIO04iL9VLe5z9S/Q1jaCHBBuXAkEA -z8gjGoi1AOp6PBBLZNsncCvcV/0aC+1se4HxTNo2+duKSDnbq+ljqOM+E7odU+Nq -ewvIWOG//e8fssd0mq3HywJBAJ8l/c8GVmrpFTx8r/nZ2Pyyjt3dH1widooDXYSV -q6Gbf41Llo5sYAtmxdndTLASuHKecacTgZVhy0FryZpLKrU= ------END RSA PRIVATE KEY----- +-----BEGIN PRIVATE KEY----- +MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANtb0+YrKuxevGpm +LrjaUhZSgz6zFAmuGFmKmUbdjmfv9zSmmdsQIksK++jK0Be9LeZy20j6ahOfuVa0 +ufEmPoP7Fy4hXegKZR9cCWcIe/A6H2xWF1IIJLRTLaU8ol/I7T+um5HD5AwAwNPP +USNU0Eegmvp+xxWu3NX2m1Veot85AgMBAAECgYA3ZdZ673X0oexFlq7AAmrutkHt +CL7LvwrpOiaBjhyTxTeSNWzvtQBkIU8DOI0bIazA4UreAFffwtvEuPmonDb3F+Iq +SMAu42XcGyVZEl+gHlTPU9XRX7nTOXVt+MlRRRxL6t9GkGfUAXI3XxJDXW3c0vBK +UL9xqD8cORXOfE06rQJBAP8mEX1ERkR64Ptsoe4281vjTlNfIbs7NMPkUnrn9N/Y +BLhjNIfQ3HFZG8BTMLfX7kCS9D593DW5tV4Z9BP/c6cCQQDcFzCcVArNh2JSywOQ +ZfTfRbJg/Z5Lt9Fkngv1meeGNPgIMLN8Sg679pAOOWmzdMO3V706rNPzSVMME7E5 +oPIfAkEA8pDddarP5tCvTTgUpmTFbakm0KoTZm2+FzHcnA4jRh+XNTjTOv98Y6Ik +eO5d1ZnKXseWvkZncQgxfdnMqqpj5wJAcNq/RVne1DbYlwWchT2Si65MYmmJ8t+F +0mcsULqjOnEMwf5e+ptq5LzwbyrHZYq5FNk7ocufPv/ZQrcSSC+cFwJBAKvOJByS +x56qyGeZLOQlWS2JS3KJo59XuLFGqcbgN9Om9xFa41Yb4N9NvplFivsvZdw3m1Q/ +SPIXQuT8RMPDVNQ= +-----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- -MIICpzCCAhCgAwIBAgIJAP+qStv1cIGNMA0GCSqGSIb3DQEBBQUAMIGJMQswCQYD -VQQGEwJVUzERMA8GA1UECBMIRGVsYXdhcmUxEzARBgNVBAcTCldpbG1pbmd0b24x -IzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9uMQwwCgYDVQQLEwNT -U0wxHzAdBgNVBAMTFnNvbWVtYWNoaW5lLnB5dGhvbi5vcmcwHhcNMDcwODI3MTY1 -NDUwWhcNMTMwMjE2MTY1NDUwWjCBiTELMAkGA1UEBhMCVVMxETAPBgNVBAgTCERl -bGF3YXJlMRMwEQYDVQQHEwpXaWxtaW5ndG9uMSMwIQYDVQQKExpQeXRob24gU29m -dHdhcmUgRm91bmRhdGlvbjEMMAoGA1UECxMDU1NMMR8wHQYDVQQDExZzb21lbWFj -aGluZS5weXRob24ub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC8ddrh -m+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9LopdJhTvbGfEj0DQs1IE8 -M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVHfhi/VwovESJlaBOp+WMn -fhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQABoxUwEzARBglghkgBhvhC -AQEEBAMCBkAwDQYJKoZIhvcNAQEFBQADgYEAF4Q5BVqmCOLv1n8je/Jw9K669VXb -08hyGzQhkemEBYQd6fzQ9A/1ZzHkJKb1P6yreOLSEh4KcxYPyrLRC1ll8nr5OlCx -CMhKkTnR6qBsdNV0XtdU2+N25hqW+Ma4ZeqsN/iiJVCGNOZGnvQuvCAGWF8+J/f/ -iHkC6gGdBJhogs4= +MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV +BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw +MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH +Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k +YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw +gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7 +6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt +pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw +FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd +BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G +lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1 +CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/sha256.pem b/lib-python/2.7/test/sha256.pem --- a/lib-python/2.7/test/sha256.pem +++ b/lib-python/2.7/test/sha256.pem @@ -1,129 +1,128 @@ # Certificate chain for https://sha256.tbs-internet.com - 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=sha-256 production/CN=sha256.tbs-internet.com - i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC + 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=Certificats TBS X509/CN=ecom.tbs-x509.com + i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business -----BEGIN CERTIFICATE----- -MIIGXTCCBUWgAwIBAgIRAMmag+ygSAdxZsbyzYjhuW0wDQYJKoZIhvcNAQELBQAw -gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl +MIIGTjCCBTagAwIBAgIQOh3d9dNDPq1cSdJmEiMpqDANBgkqhkiG9w0BAQUFADCB +yTELMAkGA1UEBhMCRlIxETAPBgNVBAgTCENhbHZhZG9zMQ0wCwYDVQQHEwRDYWVu +MRUwEwYDVQQKEwxUQlMgSU5URVJORVQxSDBGBgNVBAsTP1Rlcm1zIGFuZCBDb25k +aXRpb25zOiBodHRwOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0EvcmVwb3NpdG9y +eTEYMBYGA1UECxMPVEJTIElOVEVSTkVUIENBMR0wGwYDVQQDExRUQlMgWDUwOSBD +QSBidXNpbmVzczAeFw0xMTAxMjUwMDAwMDBaFw0xMzAyMDUyMzU5NTlaMIHHMQsw +CQYDVQQGEwJGUjEOMAwGA1UEERMFMTQwMDAxETAPBgNVBAgTCENhbHZhZG9zMQ0w +CwYDVQQHEwRDQUVOMRswGQYDVQQJExIyMiBydWUgZGUgQnJldGFnbmUxFTATBgNV +BAoTDFRCUyBJTlRFUk5FVDEXMBUGA1UECxMOMDAwMiA0NDA0NDM4MTAxHTAbBgNV +BAsTFENlcnRpZmljYXRzIFRCUyBYNTA5MRowGAYDVQQDExFlY29tLnRicy14NTA5 +LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKRrlHUnJ++1lpcg +jtYco7cdmRe+EEfTmwPfCdfV3G1QfsTSvY6FfMpm/83pqHfT+4ANwr18wD9ZrAEN +G16mf9VdCGK12+TP7DmqeZyGIqlFFoahQnmb8EarvE43/1UeQ2CV9XmzwZvpqeli +LfXsFonawrY3H6ZnMwS64St61Z+9gdyuZ/RbsoZBbT5KUjDEG844QRU4OT1IGeEI +eY5NM5RNIh6ZNhVtqeeCxMS7afONkHQrOco73RdSTRck/Hj96Ofl3MHNHryr+AMK +DGFk1kLCZGpPdXtkxXvaDeQoiYDlil26CWc+YK6xyDPMdsWvoG14ZLyCpzMXA7/7 +4YAQRH0CAwEAAaOCAjAwggIsMB8GA1UdIwQYMBaAFBoJBMz5CY+7HqDO1KQUf0vV +I1jNMB0GA1UdDgQWBBQgOU8HsWzbmD4WZP5Wtdw7jca2WDAOBgNVHQ8BAf8EBAMC +BaAwDAYDVR0TAQH/BAIwADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw +TAYDVR0gBEUwQzBBBgsrBgEEAYDlNwIBATAyMDAGCCsGAQUFBwIBFiRodHRwczov +L3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL0NQUzEwdwYDVR0fBHAwbjA3oDWgM4Yx +aHR0cDovL2NybC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNy +bDAzoDGgL4YtaHR0cDovL2NybC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5l +c3MuY3JsMIGwBggrBgEFBQcBAQSBozCBoDA9BggrBgEFBQcwAoYxaHR0cDovL2Ny +dC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNydDA5BggrBgEF +BQcwAoYtaHR0cDovL2NydC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5lc3Mu +Y3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wMwYDVR0R +BCwwKoIRZWNvbS50YnMteDUwOS5jb22CFXd3dy5lY29tLnRicy14NTA5LmNvbTAN +BgkqhkiG9w0BAQUFAAOCAQEArT4NHfbY87bGAw8lPV4DmHlmuDuVp/y7ltO3Ynse +3Rz8RxW2AzuO0Oy2F0Cu4yWKtMyEyMXyHqWtae7ElRbdTu5w5GwVBLJHClCzC8S9 +SpgMMQTx3Rgn8vjkHuU9VZQlulZyiPK7yunjc7c310S9FRZ7XxOwf8Nnx4WnB+No +WrfApzhhQl31w+RyrNxZe58hCfDDHmevRvwLjQ785ZoQXJDj2j3qAD4aI2yB8lB5 +oaE1jlCJzC7Kmz/Y9jzfmv/zAs1LQTm9ktevv4BTUFaGjv9jxnQ1xnS862ZiouLW +zZYIlYPf4F6JjXGiIQgQRglILUfq3ftJd9/ok9W9ZF8h8w== +-----END CERTIFICATE----- + 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business + i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root +-----BEGIN CERTIFICATE----- +MIIFPzCCBCegAwIBAgIQDlBz/++iRSmLDeVRHT/hADANBgkqhkiG9w0BAQUFADBv +MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk +ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF +eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDcwOTE4MTkyMlow +gckxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv -cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg -Q0EgU0dDMB4XDTEwMDIxODAwMDAwMFoXDTEyMDIxOTIzNTk1OVowgcsxCzAJBgNV -BAYTAkZSMQ4wDAYDVQQREwUxNDAwMDERMA8GA1UECBMIQ2FsdmFkb3MxDTALBgNV -BAcTBENBRU4xGzAZBgNVBAkTEjIyIHJ1ZSBkZSBCcmV0YWduZTEVMBMGA1UEChMM -VEJTIElOVEVSTkVUMRcwFQYDVQQLEw4wMDAyIDQ0MDQ0MzgxMDEbMBkGA1UECxMS -c2hhLTI1NiBwcm9kdWN0aW9uMSAwHgYDVQQDExdzaGEyNTYudGJzLWludGVybmV0 -LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbuM8VT7f0nntwu -N3F7v9KIBlhKNAxqCrziOXU5iqUt8HrQB3DtHbdmII+CpVUlwlmepsx6G+srEZ9a -MIGAy0nxi5aLb7watkyIdPjJTMvTUBQ/+RPWzt5JtYbbY9BlJ+yci0dctP74f4NU -ISLtlrEjUbf2gTohLrcE01TfmOF6PDEbB5PKDi38cB3NzKfizWfrOaJW6Q1C1qOJ -y4/4jkUREX1UFUIxzx7v62VfjXSGlcjGpBX1fvtABQOSLeE0a6gciDZs1REqroFf -5eXtqYphpTa14Z83ITXMfgg5Nze1VtMnzI9Qx4blYBw4dgQVEuIsYr7FDBOITDzc -VEVXZx0CAwEAAaOCAj8wggI7MB8GA1UdIwQYMBaAFAdEdoWTKLx/bXjSCuv6TEvf -2YIfMB0GA1UdDgQWBBSJKI/AYVI9RQNY0QPIqc8ej2QivTAOBgNVHQ8BAf8EBAMC -BaAwDAYDVR0TAQH/BAIwADA0BgNVHSUELTArBggrBgEFBQcDAQYIKwYBBQUHAwIG -CisGAQQBgjcKAwMGCWCGSAGG+EIEATBMBgNVHSAERTBDMEEGCysGAQQBgOU3AgQB -MDIwMAYIKwYBBQUHAgEWJGh0dHBzOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0Ev -Q1BTNDBtBgNVHR8EZjBkMDKgMKAuhixodHRwOi8vY3JsLnRicy1pbnRlcm5ldC5j -b20vVEJTWDUwOUNBU0dDLmNybDAuoCygKoYoaHR0cDovL2NybC50YnMteDUwOS5j -b20vVEJTWDUwOUNBU0dDLmNybDCBpgYIKwYBBQUHAQEEgZkwgZYwOAYIKwYBBQUH -MAKGLGh0dHA6Ly9jcnQudGJzLWludGVybmV0LmNvbS9UQlNYNTA5Q0FTR0MuY3J0 -MDQGCCsGAQUFBzAChihodHRwOi8vY3J0LnRicy14NTA5LmNvbS9UQlNYNTA5Q0FT -R0MuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wPwYD -VR0RBDgwNoIXc2hhMjU2LnRicy1pbnRlcm5ldC5jb22CG3d3dy5zaGEyNTYudGJz -LWludGVybmV0LmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAA5NL0D4QSqhErhlkdPmz -XtiMvdGL+ZehM4coTRIpasM/Agt36Rc0NzCvnQwKE+wkngg1Gy2qe7Q0E/ziqBtB -fZYzdVgu1zdiL4kTaf+wFKYAFGsFbyeEmXysy+CMwaNoF2vpSjCU1UD56bEnTX/W -fxVZYxtBQUpnu2wOsm8cDZuZRv9XrYgAhGj9Tt6F0aVHSDGn59uwShG1+BVF/uju -SCyPTTjL1oc7YElJUzR/x4mQJYvtQI8gDIDAGEOs7v3R/gKa5EMfbUQUI4C84UbI -Yz09Jdnws/MkC/Hm1BZEqk89u7Hvfv+oHqEb0XaUo0TDfsxE0M1sMdnLb91QNQBm -UQ== ------END CERTIFICATE----- - 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC - i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root ------BEGIN CERTIFICATE----- -MIIFVjCCBD6gAwIBAgIQXpDZ0ETJMV02WTx3GTnhhTANBgkqhkiG9w0BAQUFADBv -MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk -ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF -eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDYyNDE5MDYzMFow -gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl -bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u -ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv -cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg -Q0EgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsgOkO3f7wzN6 -rOjg45tR5vjBfzK7qmV9IBxb/QW9EEXxG+E7FNhZqQLtwGBKoSsHTnQqV75wWMk0 -9tinWvftBkSpj5sTi/8cbzJfUvTSVYh3Qxv6AVVjMMH/ruLjE6y+4PoaPs8WoYAQ -ts5R4Z1g8c/WnTepLst2x0/Wv7GmuoQi+gXvHU6YrBiu7XkeYhzc95QdviWSJRDk -owhb5K43qhcvjRmBfO/paGlCliDGZp8mHwrI21mwobWpVjTxZRwYO3bd4+TGcI4G -Ie5wmHwE8F7SK1tgSqbBacKjDa93j7txKkfz/Yd2n7TGqOXiHPsJpG655vrKtnXk -9vs1zoDeJQIDAQABo4IBljCCAZIwHQYDVR0OBBYEFAdEdoWTKLx/bXjSCuv6TEvf -2YIfMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMCAGA1UdJQQZ -MBcGCisGAQQBgjcKAwMGCWCGSAGG+EIEATAYBgNVHSAEETAPMA0GCysGAQQBgOU3 -AgQBMHsGA1UdHwR0MHIwOKA2oDSGMmh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL0Fk -ZFRydXN0RXh0ZXJuYWxDQVJvb3QuY3JsMDagNKAyhjBodHRwOi8vY3JsLmNvbW9k -by5uZXQvQWRkVHJ1c3RFeHRlcm5hbENBUm9vdC5jcmwwgYAGCCsGAQUFBwEBBHQw -cjA4BggrBgEFBQcwAoYsaHR0cDovL2NydC5jb21vZG9jYS5jb20vQWRkVHJ1c3RV -VE5TR0NDQS5jcnQwNgYIKwYBBQUHMAKGKmh0dHA6Ly9jcnQuY29tb2RvLm5ldC9B -ZGRUcnVzdFVUTlNHQ0NBLmNydDARBglghkgBhvhCAQEEBAMCAgQwDQYJKoZIhvcN -AQEFBQADggEBAK2zEzs+jcIrVK9oDkdDZNvhuBYTdCfpxfFs+OAujW0bIfJAy232 -euVsnJm6u/+OrqKudD2tad2BbejLLXhMZViaCmK7D9nrXHx4te5EP8rL19SUVqLY -1pTnv5dhNgEgvA7n5lIzDSYs7yRLsr7HJsYPr6SeYSuZizyX1SNz7ooJ32/F3X98 -RB0Mlc/E0OyOrkQ9/y5IrnpnaSora8CnUrV5XNOg+kyCz9edCyx4D5wXYcwZPVWz -8aDqquESrezPyjtfi4WRO4s/VD3HLZvOxzMrWAVYCDG9FxaOhF0QGuuG1F7F3GKV -v6prNyCl016kRl2j1UT+a7gLd8fA25A4C9E= +cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEdMBsGA1UEAxMUVEJTIFg1MDkg +Q0EgYnVzaW5lc3MwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDB1PAU +qudCcz3tmyGcf+u6EkZqonKKHrV4gZYbvVkIRojmmlhfi/jwvpHvo8bqSt/9Rj5S +jhCDW0pcbI+IPPtD1Jy+CHNSfnMqVDy6CKQ3p5maTzCMG6ZT+XjnvcND5v+FtaiB +xk1iCX6uvt0jeUtdZvYbyytsSDE6c3Y5//wRxOF8tM1JxibwO3pyER26jbbN2gQz +m/EkdGjLdJ4svPk23WDAvQ6G0/z2LcAaJB+XLfqRwfQpHQvfKa1uTi8PivC8qtip +rmNQMMPMjxSK2azX8cKjjTDJiUKaCb4VHlJDWKEsCFRpgJAoAuX8f7Yfs1M4esGo +sWb3PGspK3O22uIlAgMBAAGjggF6MIIBdjAdBgNVHQ4EFgQUGgkEzPkJj7seoM7U +pBR/S9UjWM0wDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwGAYD +VR0gBBEwDzANBgsrBgEEAYDlNwIBATB7BgNVHR8EdDByMDigNqA0hjJodHRwOi8v +Y3JsLmNvbW9kb2NhLmNvbS9BZGRUcnVzdEV4dGVybmFsQ0FSb290LmNybDA2oDSg +MoYwaHR0cDovL2NybC5jb21vZG8ubmV0L0FkZFRydXN0RXh0ZXJuYWxDQVJvb3Qu +Y3JsMIGGBggrBgEFBQcBAQR6MHgwOwYIKwYBBQUHMAKGL2h0dHA6Ly9jcnQuY29t +b2RvY2EuY29tL0FkZFRydXN0VVROU2VydmVyQ0EuY3J0MDkGCCsGAQUFBzAChi1o +dHRwOi8vY3J0LmNvbW9kby5uZXQvQWRkVHJ1c3RVVE5TZXJ2ZXJDQS5jcnQwEQYJ +YIZIAYb4QgEBBAQDAgIEMA0GCSqGSIb3DQEBBQUAA4IBAQA7mqrMgk/MrE6QnbNA +h4nRCn2ti4bg4w2C3lB6bSvRPnYwuNw9Jb8vuKkNFzRDxNJXqVDZdfFW5CVQJuyd +nfAx83+wk+spzvFaE1KhFYfN9G9pQfXUfvDRoIcJgPEKUXL1wRiOG+IjU3VVI8pg +IgqHkr7ylln5i5zCiFAPuIJmYUSFg/gxH5xkCNcjJqqrHrHatJr6Qrrke93joupw +oU1njfAcZtYp6fbiK6u2b1pJqwkVBE8RsfLnPhRj+SFbpvjv8Od7o/ieJhFIYQNU +k2jX2u8qZnAiNw93LZW9lpYjtuvMXq8QQppENNja5b53q7UwI+lU7ZGjZ7quuESp +J6/5 -----END CERTIFICATE----- 2 s:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root - i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware -----BEGIN CERTIFICATE----- -MIIEZjCCA06gAwIBAgIQUSYKkxzif5zDpV954HKugjANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +MIIETzCCAzegAwIBAgIQHM5EYpUZep1jUvnyI6m2mDANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw0wNTA2MDcwODA5MTBaFw0xOTA2MjQxOTA2MzBaMG8xCzAJBgNVBAYT -AlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0 -ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB -IFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC39xoz5vIABC05 -4E5b7R+8bA/Ntfojts7emxEzl6QpTH2Tn71KvJPtAxrjj8/lbVBa1pcplFqAsEl6 -2y6V/bjKvzc4LR4+kUGtcFbH8E8/6DKedMrIkFTpxl8PeJ2aQDwOrGGqXhSPnoeh -alDc15pOrwWzpnGUnHGzUGAKxxOdOAeGAqjpqGkmGJCrTLBPI6s6T4TY386f4Wlv -u9dC12tE5Met7m1BX3JacQg3s3llpFmglDf3AC8NwpJy2tA4ctsUqEXEXSp9t7TW -xO6szRNEt8kr3UMAJfphuWlqWCMRt6czj1Z1WfXNKddGtworZbbTQm8Vsrh7++/p -XVPVNFonAgMBAAGjgdgwgdUwHwYDVR0jBBgwFoAUUzLRs89/+uDxoF2FTpLSnkUd -tE8wHQYDVR0OBBYEFK29mHo0tCb3+sQmVO8DveAky1QaMA4GA1UdDwEB/wQEAwIB -BjAPBgNVHRMBAf8EBTADAQH/MBEGCWCGSAGG+EIBAQQEAwIBAjAgBgNVHSUEGTAX -BgorBgEEAYI3CgMDBglghkgBhvhCBAEwPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDov -L2NybC51c2VydHJ1c3QuY29tL1VUTi1EQVRBQ29ycFNHQy5jcmwwDQYJKoZIhvcN -AQEFBQADggEBAMbuUxdoFLJRIh6QWA2U/b3xcOWGLcM2MY9USEbnLQg3vGwKYOEO -rVE04BKT6b64q7gmtOmWPSiPrmQH/uAB7MXjkesYoPF1ftsK5p+R26+udd8jkWjd -FwBaS/9kbHDrARrQkNnHptZt9hPk/7XJ0h4qy7ElQyZ42TCbTg0evmnv3+r+LbPM -+bDdtRTKkdSytaX7ARmjR3mfnYyVhzT4HziS2jamEfpr62vp3EV4FTkG101B5CHI -3C+H0be/SGB1pWLLJN47YaApIKa+xWycxOkKaSLvkTr6Jq/RW0GnOuL4OAdCq8Fb -+M5tug8EPzI0rNwEKNdwMBQmBsTkm5jVz3g= +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNMDUwNjA3MDgwOTEwWhcNMTkwNzA5MTgxOTIyWjBvMQswCQYD +VQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0 +IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5h +bCBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt/caM+by +AAQtOeBOW+0fvGwPzbX6I7bO3psRM5ekKUx9k5+9SryT7QMa44/P5W1QWtaXKZRa +gLBJetsulf24yr83OC0ePpFBrXBWx/BPP+gynnTKyJBU6cZfD3idmkA8Dqxhql4U +j56HoWpQ3NeaTq8Fs6ZxlJxxs1BgCscTnTgHhgKo6ahpJhiQq0ywTyOrOk+E2N/O +n+Fpb7vXQtdrROTHre5tQV9yWnEIN7N5ZaRZoJQ39wAvDcKSctrQOHLbFKhFxF0q +fbe01sTurM0TRLfJK91DACX6YblpalgjEbenM49WdVn1zSnXRrcKK2W200JvFbK4 +e/vv6V1T1TRaJwIDAQABo4G9MIG6MB8GA1UdIwQYMBaAFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMB0GA1UdDgQWBBStvZh6NLQm9/rEJlTvA73gJMtUGjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zARBglghkgBhvhCAQEEBAMCAQIwRAYDVR0f +BD0wOzA5oDegNYYzaHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmly +c3QtSGFyZHdhcmUuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQByQhANOs4kClrwF8BW +onvUOGCSjRK52zYZgDXYNjDtmr5rJ6NyPFDNn+JxkLpjYetIFMTbSRe679Bt8m7a +gIAoQYFQtxMuyLnJegB2aEbQiIxh/tC21UcFF7ktdnDoTlA6w3pLuvunaI84Of3o +2YBrhzkTbCfaYk5JRlTpudW9DkUkHBsyx3nknPKnplkIGaK0jgn8E0n+SFabYaHk +I9LroYT/+JtLefh9lgBdAgVv0UPbzoGfuDsrk/Zh+UrgbLFpHoVnElhzbkh64Z0X +OGaJunQc68cCZu5HTn/aK7fBGMcVflRCXLVEQpU9PIAdGA8Ynvg684t8GMaKsRl1 +jIGZ -----END CERTIFICATE----- - 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC - i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware -----BEGIN CERTIFICATE----- -MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG -EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD -VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu -dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 -E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ -D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK -4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq -lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW -bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB -o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT -MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js -LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr -BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB -AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft -Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj -j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH -KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv -2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 -mfnGV/TJVTl4uix5yaaIK/QI +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG +A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe +MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v +d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh +cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn +0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ +M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a +MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd +oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI +DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy +oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0 +dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy +bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF +BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli +CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE +CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t +3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS +KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA== -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/test_mailbox.py b/lib-python/2.7/test/test_mailbox.py --- a/lib-python/2.7/test/test_mailbox.py +++ b/lib-python/2.7/test/test_mailbox.py @@ -38,14 +38,9 @@ def _delete_recursively(self, target): # Delete a file or delete a directory recursively if os.path.isdir(target): - for path, dirs, files in os.walk(target, topdown=False): - for name in files: - os.remove(os.path.join(path, name)) - for name in dirs: - os.rmdir(os.path.join(path, name)) - os.rmdir(target) + test_support.rmtree(target) elif os.path.exists(target): - os.remove(target) + test_support.unlink(target) class TestMailbox(TestBase): @@ -137,6 +132,7 @@ msg = self._box.get(key1) self.assertEqual(msg['from'], 'foo') self.assertEqual(msg.fp.read(), '1') + msg.fp.close() def test_getitem(self): # Retrieve message using __getitem__() @@ -169,10 +165,14 @@ # Get file representations of messages key0 = self._box.add(self._template % 0) key1 = self._box.add(_sample_message) - self.assertEqual(self._box.get_file(key0).read().replace(os.linesep, '\n'), + msg0 = self._box.get_file(key0) + self.assertEqual(msg0.read().replace(os.linesep, '\n'), self._template % 0) - self.assertEqual(self._box.get_file(key1).read().replace(os.linesep, '\n'), + msg1 = self._box.get_file(key1) + self.assertEqual(msg1.read().replace(os.linesep, '\n'), _sample_message) + msg0.close() + msg1.close() def test_get_file_can_be_closed_twice(self): # Issue 11700 @@ -407,6 +407,7 @@ self._box.add(contents[0]) self._box.add(contents[1]) self._box.add(contents[2]) + oldbox = self._box method() if should_call_close: self._box.close() @@ -415,6 +416,7 @@ self.assertEqual(len(keys), 3) for key in keys: self.assertIn(self._box.get_string(key), contents) + oldbox.close() def test_dump_message(self): # Write message representations to disk @@ -1835,6 +1837,10 @@ def setUp(self): # create a new maildir mailbox to work with: self._dir = test_support.TESTFN + if os.path.isdir(self._dir): + test_support.rmtree(self._dir) + if os.path.isfile(self._dir): + test_support.unlink(self._dir) os.mkdir(self._dir) os.mkdir(os.path.join(self._dir, "cur")) os.mkdir(os.path.join(self._dir, "tmp")) @@ -1844,10 +1850,10 @@ def tearDown(self): map(os.unlink, self._msgfiles) - os.rmdir(os.path.join(self._dir, "cur")) - os.rmdir(os.path.join(self._dir, "tmp")) - os.rmdir(os.path.join(self._dir, "new")) - os.rmdir(self._dir) + test_support.rmdir(os.path.join(self._dir, "cur")) + test_support.rmdir(os.path.join(self._dir, "tmp")) + test_support.rmdir(os.path.join(self._dir, "new")) + test_support.rmdir(self._dir) def createMessage(self, dir, mbox=False): t = int(time.time() % 1000000) @@ -1883,7 +1889,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1891,7 +1899,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1900,8 +1910,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 2) - self.assertIsNot(self.mbox.next(), None) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1910,11 +1924,13 @@ import email.parser fname = self.createMessage("cur", True) n = 0 - for msg in mailbox.PortableUnixMailbox(open(fname), + fid = open(fname) + for msg in mailbox.PortableUnixMailbox(fid, email.parser.Parser().parse): n += 1 self.assertEqual(msg["subject"], "Simple Test") self.assertEqual(len(str(msg)), len(FROM_)+len(DUMMY_MESSAGE)) + fid.close() self.assertEqual(n, 1) ## End: classes from the original module (for backward compatibility). diff --git a/lib-python/2.7/test/test_mmap.py b/lib-python/2.7/test/test_mmap.py --- a/lib-python/2.7/test/test_mmap.py +++ b/lib-python/2.7/test/test_mmap.py @@ -11,7 +11,7 @@ def setUp(self): if os.path.exists(TESTFN): - os.unlink(TESTFN) + unlink(TESTFN) def tearDown(self): try: diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # diff --git a/lib-python/2.7/test/test_old_mailbox.py b/lib-python/2.7/test/test_old_mailbox.py --- a/lib-python/2.7/test/test_old_mailbox.py +++ b/lib-python/2.7/test/test_old_mailbox.py @@ -73,7 +73,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -81,7 +83,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -90,8 +94,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 2) - self.assertTrue(self.mbox.next() is not None) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -75,7 +75,7 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + open(name, "w").close() self.files.append(name) def test_tempnam(self): diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -111,13 +111,12 @@ if test_support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") self.assertEqual(p['subject'], - ((('countryName', u'US'),), - (('stateOrProvinceName', u'Delaware'),), - (('localityName', u'Wilmington'),), - (('organizationName', u'Python Software Foundation'),), - (('organizationalUnitName', u'SSL'),), - (('commonName', u'somemachine.python.org'),)), + ((('countryName', 'XY'),), + (('localityName', 'Castle Anthrax'),), + (('organizationName', 'Python Software Foundation'),), + (('commonName', 'localhost'),)) ) + self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),)) # Issue #13034: the subjectAltName in some certificates # (notably projects.developer.nokia.com:443) wasn't parsed p = ssl._ssl._test_decode_cert(NOKIACERT) diff --git a/lib-python/2.7/test/test_support.py b/lib-python/2.7/test/test_support.py --- a/lib-python/2.7/test/test_support.py +++ b/lib-python/2.7/test/test_support.py @@ -179,15 +179,79 @@ except KeyError: pass +if sys.platform.startswith("win"): + def _waitfor(func, pathname, waitall=False): + # Peform the operation + func(pathname) + # Now setup the wait loop + if waitall: + dirname = pathname + else: + dirname, name = os.path.split(pathname) + dirname = dirname or '.' + # Check for `pathname` to be removed from the filesystem. + # The exponential backoff of the timeout amounts to a total + # of ~1 second after which the deletion is probably an error + # anyway. + # Testing on a i7 at 4.3GHz shows that usually only 1 iteration is + # required when contention occurs. + timeout = 0.001 + while timeout < 1.0: + # Note we are only testing for the existance of the file(s) in + # the contents of the directory regardless of any security or + # access rights. If we have made it this far, we have sufficient + # permissions to do that much using Python's equivalent of the + # Windows API FindFirstFile. + # Other Windows APIs can fail or give incorrect results when + # dealing with files that are pending deletion. + L = os.listdir(dirname) + if not (L if waitall else name in L): + return + # Increase the timeout and try again + time.sleep(timeout) + timeout *= 2 + warnings.warn('tests may fail, delete still pending for ' + pathname, + RuntimeWarning, stacklevel=4) + + def _unlink(filename): + _waitfor(os.unlink, filename) + + def _rmdir(dirname): + _waitfor(os.rmdir, dirname) + + def _rmtree(path): + def _rmtree_inner(path): + for name in os.listdir(path): + fullname = os.path.join(path, name) + if os.path.isdir(fullname): + _waitfor(_rmtree_inner, fullname, waitall=True) + os.rmdir(fullname) + else: + os.unlink(fullname) + _waitfor(_rmtree_inner, path, waitall=True) + _waitfor(os.rmdir, path) +else: + _unlink = os.unlink + _rmdir = os.rmdir + _rmtree = shutil.rmtree + def unlink(filename): try: - os.unlink(filename) + _unlink(filename) except OSError: pass +def rmdir(dirname): + try: + _rmdir(dirname) + except OSError as error: + # The directory need not exist. + if error.errno != errno.ENOENT: + raise + def rmtree(path): try: - shutil.rmtree(path) + _rmtree(path) except OSError, e: # Unix returns ENOENT, Windows returns ESRCH. if e.errno not in (errno.ENOENT, errno.ESRCH): diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/2.7/test/test_tarfile.py --- a/lib-python/2.7/test/test_tarfile.py +++ b/lib-python/2.7/test/test_tarfile.py @@ -300,26 +300,21 @@ def test_extract_hardlink(self): # Test hardlink extraction (e.g. bug #857297). - tar = tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") + with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar: + tar.extract("ustar/regtype", TEMPDIR) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/regtype")) - tar.extract("ustar/regtype", TEMPDIR) - try: tar.extract("ustar/lnktype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("hardlink not extracted properly") + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/lnktype")) + with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) - data = open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) - - try: tar.extract("ustar/symtype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("symlink not extracted properly") - - data = open(os.path.join(TEMPDIR, "ustar/symtype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/symtype")) + with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) def test_extractall(self): # Test if extractall() correctly restores directory permissions @@ -340,7 +335,7 @@ # constructor in case of an error. For the test we rely on # the fact that opening an empty file raises a ReadError. empty = os.path.join(TEMPDIR, "empty") - open(empty, "wb").write("") + open(empty, "wb").close() try: tar = object.__new__(tarfile.TarFile) @@ -351,7 +346,7 @@ else: self.fail("ReadError not raised") finally: - os.remove(empty) + test_support.unlink(empty) class StreamReadTest(CommonReadTest): @@ -1327,7 +1322,7 @@ def setUp(self): self.tarname = tmpname if os.path.exists(self.tarname): - os.remove(self.tarname) + test_support.unlink(self.tarname) def _add_testfile(self, fileobj=None): tar = tarfile.open(self.tarname, "a", fileobj=fileobj) diff --git a/lib-python/2.7/traceback.py b/lib-python/2.7/traceback.py --- a/lib-python/2.7/traceback.py +++ b/lib-python/2.7/traceback.py @@ -107,7 +107,7 @@ return list -def print_exception(etype, value, tb, limit=None, file=None): +def print_exception(etype, value, tb, limit=None, file=None, _encoding=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if @@ -123,7 +123,7 @@ if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) - lines = format_exception_only(etype, value) + lines = format_exception_only(etype, value, _encoding) for line in lines: _print(file, line, '') @@ -144,7 +144,7 @@ list = list + format_exception_only(etype, value) return list -def format_exception_only(etype, value): +def format_exception_only(etype, value, _encoding=None): """Format the exception part of a traceback. The arguments are the exception type and value such as given by @@ -170,12 +170,12 @@ if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or etype is None or type(etype) is str): - return [_format_final_exc_line(etype, value)] + return [_format_final_exc_line(etype, value, _encoding)] stype = etype.__name__ if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] + return [_format_final_exc_line(stype, value, _encoding)] # It was a syntax error; show exactly where the problem was found. lines = [] @@ -196,26 +196,26 @@ lines.append(' %s^\n' % ''.join(caretspace)) value = msg - lines.append(_format_final_exc_line(stype, value)) + lines.append(_format_final_exc_line(stype, value, _encoding)) return lines -def _format_final_exc_line(etype, value): +def _format_final_exc_line(etype, value, _encoding=None): """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) + valuestr = _some_str(value, _encoding) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line -def _some_str(value): +def _some_str(value, _encoding=None): try: return str(value) except Exception: pass try: value = unicode(value) - return value.encode("ascii", "backslashreplace") + return value.encode(_encoding or "ascii", "backslashreplace") except Exception: pass return '' % type(value).__name__ diff --git a/lib-python/2.7/uuid.py b/lib-python/2.7/uuid.py --- a/lib-python/2.7/uuid.py +++ b/lib-python/2.7/uuid.py @@ -128,10 +128,10 @@ """ if hex is not None: - if (bytes is not None or bytes_le is not None or fields is not None - or int is not None): - raise TypeError('if the hex argument is given, bytes, bytes_le, fields,' - ' and int need to be None') + if (bytes is not None or bytes_le is not None or + fields is not None or int is not None): + raise TypeError('if the hex argument is given, bytes,' + ' bytes_le, fields, and int need to be None') hex = hex.replace('urn:', '').replace('uuid:', '') hex = hex.strip('{}').replace('-', '') if len(hex) != 32: @@ -139,8 +139,8 @@ int = long(hex, 16) elif bytes_le is not None: if bytes is not None or fields is not None or int is not None: - raise TypeError('if the bytes_le argument is given, bytes, fields,' - ' and int need to be None') + raise TypeError('if the bytes_le argument is given, bytes,' + ' fields, and int need to be None') if len(bytes_le) != 16: raise ValueError('bytes_le is not a 16-char string') bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] + @@ -150,15 +150,16 @@ struct.unpack('>Q', bytes[8:])[0]) elif bytes is not None: if fields is not None or int is not None: - raise TypeError('if the bytes argument is given, fields' - ' and int need to be None') + raise TypeError('if the bytes argument is given, fields ' + 'and int need to be None') if len(bytes) != 16: raise ValueError('bytes is not a 16-char string') int = (struct.unpack('>Q', bytes[:8])[0] << 64 | struct.unpack('>Q', bytes[8:])[0]) elif fields is not None: if int is not None: - raise TypeError('if the fields argument is given, int needs to be None') + raise TypeError('if the fields argument is given, int needs' + ' to be None') if len(fields) != 6: raise ValueError('fields is not a 6-tuple') (time_low, time_mid, time_hi_version, diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -48,7 +48,14 @@ def remove(wr, selfref=ref(self)): self = selfref() if self is not None: - del self.data[wr.key] + # Changed this for PyPy: made more resistent. The + # issue is that in some corner cases, self.data + # might already be changed or removed by the time + # this weakref's callback is called. If that is + # the case, we don't want to randomly kill an + # unrelated entry. + if self.data.get(wr.key) is wr: + del self.data[wr.key] self._remove = remove UserDict.UserDict.__init__(self, *args, **kw) @@ -160,22 +167,26 @@ try: o = self.data.pop(key)() except KeyError: + o = None + if o is None: if args: return args[0] - raise - if o is None: raise KeyError, key else: return o + # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: - wr = self.data[key] + o = self.data[key]() except KeyError: + o = None + if o is None: self.data[key] = KeyedRef(default, self._remove, key) return default else: - return wr() + return o + # The logic above was fixed in PyPy def update(self, dict=None, **kwargs): d = self.data diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -268,10 +268,18 @@ if _has_load_extension(): _ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);") -_lib = _ffi.verify(""" -#include -""", libraries=['sqlite3'] -) +if sys.platform.startswith('freebsd'): + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'] + ) +else: + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'] + ) exported_sqlite_symbols = [ 'SQLITE_ALTER_TABLE', @@ -363,9 +371,11 @@ pass -def connect(database, **kwargs): - factory = kwargs.get("factory", Connection) - return factory(database, **kwargs) +def connect(database, timeout=5.0, detect_types=0, isolation_level="", + check_same_thread=True, factory=None, cached_statements=100): + factory = Connection if not factory else factory + return factory(database, timeout, detect_types, isolation_level, + check_same_thread, factory, cached_statements) def _unicode_text_factory(x): diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py --- a/lib_pypy/_tkinter/__init__.py +++ b/lib_pypy/_tkinter/__init__.py @@ -22,6 +22,7 @@ READABLE = tklib.TCL_READABLE WRITABLE = tklib.TCL_WRITABLE EXCEPTION = tklib.TCL_EXCEPTION +DONT_WAIT = tklib.TCL_DONT_WAIT def create(screenName=None, baseName=None, className=None, interactive=False, wantobjects=False, wantTk=True, diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -4,7 +4,23 @@ from . import TclError from .tclobj import TclObject, FromObj, AsObj, TypeCache +import contextlib import sys +import threading +import time + + +class _DummyLock(object): + "A lock-like object that does not do anything" + def acquire(self): + pass + def release(self): + pass + def __enter__(self): + pass + def __exit__(self, *exc): + pass + def varname_converter(input): if isinstance(input, TclObject): @@ -37,17 +53,18 @@ def PythonCmd(clientData, interp, argc, argv): self = tkffi.from_handle(clientData) assert self.app.interp == interp - try: - args = [tkffi.string(arg) for arg in argv[1:argc]] - result = self.func(*args) - obj = AsObj(result) - tklib.Tcl_SetObjResult(interp, obj) - except: - self.app.errorInCmd = True - self.app.exc_info = sys.exc_info() - return tklib.TCL_ERROR - else: - return tklib.TCL_OK + with self.app._tcl_lock_released(): + try: + args = [tkffi.string(arg) for arg in argv[1:argc]] + result = self.func(*args) + obj = AsObj(result) + tklib.Tcl_SetObjResult(interp, obj) + except: + self.app.errorInCmd = True + self.app.exc_info = sys.exc_info() + return tklib.TCL_ERROR + else: + return tklib.TCL_OK @tkffi.callback("Tcl_CmdDeleteProc") def PythonCmdDelete(clientData): @@ -58,6 +75,8 @@ class TkApp(object): + _busywaitinterval = 0.02 # 20ms. + def __new__(cls, screenName, baseName, className, interactive, wantobjects, wantTk, sync, use): if not wantobjects: @@ -73,6 +92,12 @@ self.quitMainLoop = False self.errorInCmd = False + if not self.threaded: + # TCL is not thread-safe, calls needs to be serialized. + self._tcl_lock = threading.Lock() + else: + self._tcl_lock = _DummyLock() + self._typeCache = TypeCache() self._commands = {} @@ -133,6 +158,13 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise RuntimeError("Calling Tcl from different appartment") + @contextlib.contextmanager + def _tcl_lock_released(self): + "Context manager to temporarily release the tcl lock." + self._tcl_lock.release() + yield + self._tcl_lock.acquire() + def loadtk(self): # We want to guard against calling Tk_Init() multiple times err = tklib.Tcl_Eval(self.interp, "info exists tk_version") @@ -159,22 +191,25 @@ flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) - if not res: - self.raiseTclError() - assert self._wantobjects - return FromObj(self, res) + with self._tcl_lock: + res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) + if not res: + self.raiseTclError() + assert self._wantobjects + return FromObj(self, res) def _setvar(self, name1, value, global_only=False): name1 = varname_converter(name1) + # XXX Acquire tcl lock??? newval = AsObj(value) flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, - newval, flags) - if not res: - self.raiseTclError() + with self._tcl_lock: + res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, + newval, flags) + if not res: + self.raiseTclError() def _unsetvar(self, name1, name2=None, global_only=False): name1 = varname_converter(name1) @@ -183,9 +218,10 @@ flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) - if res == tklib.TCL_ERROR: - self.raiseTclError() + with self._tcl_lock: + res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() def getvar(self, name1, name2=None): return self._var_invoke(self._getvar, name1, name2) @@ -219,9 +255,10 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise NotImplementedError("Call from another thread") - res = tklib.Tcl_CreateCommand( - self.interp, cmdName, _CommandData.PythonCmd, - clientData, _CommandData.PythonCmdDelete) + with self._tcl_lock: + res = tklib.Tcl_CreateCommand( + self.interp, cmdName, _CommandData.PythonCmd, + clientData, _CommandData.PythonCmdDelete) if not res: raise TclError("can't create Tcl command") @@ -229,7 +266,8 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise NotImplementedError("Call from another thread") - res = tklib.Tcl_DeleteCommand(self.interp, cmdName) + with self._tcl_lock: + res = tklib.Tcl_DeleteCommand(self.interp, cmdName) if res == -1: raise TclError("can't delete Tcl command") @@ -256,11 +294,12 @@ tklib.Tcl_IncrRefCount(obj) objects[i] = obj - res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) - if res == tklib.TCL_ERROR: - self.raiseTclError() - else: - result = self._callResult() + with self._tcl_lock: + res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + else: + result = self._callResult() finally: for obj in objects: if obj: @@ -280,17 +319,19 @@ def eval(self, script): self._check_tcl_appartment() - res = tklib.Tcl_Eval(self.interp, script) - if res == tklib.TCL_ERROR: - self.raiseTclError() - return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + with self._tcl_lock: + res = tklib.Tcl_Eval(self.interp, script) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) def evalfile(self, filename): self._check_tcl_appartment() - res = tklib.Tcl_EvalFile(self.interp, filename) - if res == tklib.TCL_ERROR: - self.raiseTclError() - return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + with self._tcl_lock: + res = tklib.Tcl_EvalFile(self.interp, filename) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) def split(self, arg): if isinstance(arg, tuple): @@ -375,7 +416,10 @@ if self.threaded: result = tklib.Tcl_DoOneEvent(0) else: - raise NotImplementedError("TCL configured without threads") + with self._tcl_lock: + result = tklib.Tcl_DoOneEvent(tklib.TCL_DONT_WAIT) + if result == 0: + time.sleep(self._busywaitinterval) if result < 0: break diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py --- a/lib_pypy/_tkinter/tclobj.py +++ b/lib_pypy/_tkinter/tclobj.py @@ -28,9 +28,11 @@ return result elif value.typePtr == typeCache.BooleanType: - return result + return bool(value.internalRep.longValue) elif value.typePtr == typeCache.ByteArrayType: - return result + size = tkffi.new('int*') + data = tklib.Tcl_GetByteArrayFromObj(value, size) + return tkffi.buffer(data, size[0])[:] elif value.typePtr == typeCache.DoubleType: return value.internalRep.doubleValue elif value.typePtr == typeCache.IntType: @@ -50,7 +52,7 @@ result.append(FromObj(app, tcl_elem[0])) return tuple(result) elif value.typePtr == typeCache.ProcBodyType: - return result + pass # fall through and return tcl object. elif value.typePtr == typeCache.StringType: buf = tklib.Tcl_GetUnicode(value) length = tklib.Tcl_GetCharLength(value) diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -1,6 +1,7 @@ # C bindings with libtcl and libtk. from cffi import FFI +import sys tkffi = FFI() @@ -18,6 +19,8 @@ #define TCL_EVAL_DIRECT ... #define TCL_EVAL_GLOBAL ... +#define TCL_DONT_WAIT ... + typedef unsigned short Tcl_UniChar; typedef ... Tcl_Interp; typedef ...* Tcl_ThreadId; @@ -69,6 +72,7 @@ int Tcl_GetBoolean(Tcl_Interp* interp, const char* src, int* boolPtr); char *Tcl_GetString(Tcl_Obj* objPtr); char *Tcl_GetStringFromObj(Tcl_Obj* objPtr, int* lengthPtr); +unsigned char *Tcl_GetByteArrayFromObj(Tcl_Obj* objPtr, int* lengthPtr); Tcl_UniChar *Tcl_GetUnicode(Tcl_Obj* objPtr); int Tcl_GetCharLength(Tcl_Obj* objPtr); @@ -102,6 +106,25 @@ int Tk_GetNumMainWindows(); """) +# XXX find a better way to detect paths +# XXX pick up CPPFLAGS and LDFLAGS and add to these paths? +if sys.platform.startswith("openbsd"): + incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] + linklibs = ['tk85', 'tcl85'] + libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform.startswith("freebsd"): + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include'] + linklibs = ['tk86', 'tcl86'] + libdirs = ['/usr/local/lib'] +elif sys.platform == 'win32': + incdirs = [] + linklibs = ['tcl85', 'tk85'] + libdirs = [] +else: + incdirs=['/usr/include/tcl'] + linklibs=['tcl', 'tk'] + libdirs = [] + tklib = tkffi.verify(""" #include #include @@ -109,6 +132,7 @@ char *get_tk_version() { return TK_VERSION; } char *get_tcl_version() { return TCL_VERSION; } """, -include_dirs=['/usr/include/tcl'], -libraries=['tcl', 'tk'], +include_dirs=incdirs, +libraries=linklibs, +library_dirs = libdirs ) diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.7 +Version: 0.8 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7" -__version_info__ = (0, 7) +__version__ = "0.8" +__version_info__ = (0, 8) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,5 @@ import types +from .lock import allocate_lock try: callable @@ -54,12 +55,14 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert backend.__version__ == __version__ + assert (backend.__version__ == __version__ or + backend.__version__ == __version__[:3]) # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) self._backend = backend + self._lock = allocate_lock() self._parser = cparser.Parser() self._cached_btypes = {} self._parsed_types = types.ModuleType('parsed_types').__dict__ @@ -73,7 +76,8 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - self.BVoidP = self._get_cached_btype(model.voidp_type) + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -94,11 +98,12 @@ if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') - self._parser.parse(csource, override=override) - self._cdefsources.append(csource) - if override: - for cache in self._function_caches: - cache.clear() + with self._lock: + self._parser.parse(csource, override=override) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -108,31 +113,47 @@ library we only look for the actual (untyped) symbols. """ assert isinstance(name, basestring) or name is None - lib, function_cache = _make_ffi_library(self, name, flags) - self._function_caches.append(function_cache) - self._libraries.append(lib) + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) return lib + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + really_a_function_type = type.is_raw_function + if really_a_function_type: + type = type.as_function_pointer() + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, cfaf = self._parsed_types[cdecl] - if consider_function_as_funcptr and not cfaf: - raise KeyError + result = self._parsed_types[cdecl] except KeyError: - key = cdecl - if not isinstance(cdecl, str): # unicode, on Python 2 - cdecl = cdecl.encode('ascii') - cfaf = consider_function_as_funcptr - type = self._parser.parse_type(cdecl, - consider_function_as_funcptr=cfaf) - btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, cfaf + with self._lock: + result = self._typeof_locked(cdecl) + # + btype, really_a_function_type = result + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) return btype def typeof(self, cdecl): """Parse the C type given as a string and return the - corresponding Python type: '>. + corresponding object. It can also be used on 'cdata' instance to get its C type. """ if isinstance(cdecl, basestring): @@ -143,6 +164,10 @@ res = _builtin_function_type(cdecl) if res is not None: return res + if (isinstance(cdecl, types.FunctionType) + and hasattr(cdecl, '_cffi_base_type')): + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) def sizeof(self, cdecl): @@ -279,14 +304,17 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + with self._lock: + try: + gc_weakrefs = self.gc_weakrefs + except AttributeError: + from .gc_weakref import GcWeakrefs + gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) + return gc_weakrefs.build(cdata, destructor) def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! try: BType = self._cached_btypes[type] except KeyError: @@ -319,9 +347,13 @@ errno = property(_get_errno, _set_errno, None, "the value of 'errno' from/to the C calls") + def getwinerror(self, code=-1): + return self._backend.getwinerror(code) + def _pointer_to(self, ctype): from . import model - return model.pointer_cache(self, ctype) + with self._lock: + return model.pointer_cache(self, ctype) def addressof(self, cdata, field=None): """Return the address of a . @@ -341,10 +373,12 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ - self._parser.include(ffi_to_include._parser) - self._cdefsources.append('[') - self._cdefsources.extend(ffi_to_include._cdefsources) - self._cdefsources.append(']') + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) @@ -371,7 +405,7 @@ backendlib = backend.load_library(path, flags) copied_enums = [] # - def make_accessor(name): + def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: tp = ffi._parser._declarations[key] @@ -403,11 +437,17 @@ if enumname not in library.__dict__: library.__dict__[enumname] = enumval copied_enums.append(True) + if name in library.__dict__: + return # - if name in library.__dict__: # copied from an enum value just above, - return # or multithread's race condition raise AttributeError(name) # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + make_accessor_locked(name) + # class FFILibrary(object): def __getattr__(self, name): make_accessor(name) @@ -443,4 +483,5 @@ except (KeyError, AttributeError, TypeError): return None else: - return ffi._get_cached_btype(tp) + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/lib_pypy/cffi/commontypes.py b/lib_pypy/cffi/commontypes.py --- a/lib_pypy/cffi/commontypes.py +++ b/lib_pypy/cffi/commontypes.py @@ -30,7 +30,9 @@ elif result in model.PrimitiveType.ALL_PRIMITIVE_TYPES: result = model.PrimitiveType(result) else: - assert commontype != result + if commontype == result: + raise api.FFIError("Unsupported type: %r. Please file a bug " + "if you think it should be." % (commontype,)) result = resolve_common_type(result) # recursively assert isinstance(result, model.BaseTypeByIdentity) _CACHE[commontype] = result diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -142,7 +142,7 @@ if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] if line: - msg = 'cannot parse "%s"\n%s' % (line, msg) + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) @@ -217,19 +217,18 @@ # if decl.name: tp = self._get_type(node, partial_length_ok=True) - if self._is_constant_declaration(node): + if self._is_constant_globalvar(node): self._declare('constant ' + decl.name, tp) else: self._declare('variable ' + decl.name, tp) - def parse_type(self, cdecl, consider_function_as_funcptr=False): + def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type, - consider_function_as_funcptr=consider_function_as_funcptr) + return self._get_type(exprnode.type) def _declare(self, name, obj): if name in self._declarations: @@ -249,28 +248,17 @@ return model.ConstPointerType(type) return model.PointerType(type) - def _get_type(self, typenode, convert_array_to_pointer=False, - name=None, partial_length_ok=False, - consider_function_as_funcptr=False): + def _get_type(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): type = self._declarations['typedef ' + typenode.type.names[0]] - if isinstance(type, model.ArrayType): - if convert_array_to_pointer: - return type.item - else: - if (consider_function_as_funcptr and - isinstance(type, model.RawFunctionType)): - return type.as_function_pointer() return type # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type - if convert_array_to_pointer: - return self._get_type_pointer(self._get_type(typenode.type)) if typenode.dim is None: length = None else: @@ -290,13 +278,26 @@ # assume a primitive type. get it from .names, but reduce # synonyms to a single chosen combination names = list(type.names) - if names == ['signed'] or names == ['unsigned']: - names.append('int') - if names[0] == 'signed' and names != ['signed', 'char']: - names.pop(0) - if (len(names) > 1 and names[-1] == 'int' - and names != ['unsigned', 'int']): - names.pop() + if names != ['signed', 'char']: # keep this unmodified + prefixes = {} + while names: + name = names[0] + if name in ('short', 'long', 'signed', 'unsigned'): + prefixes[name] = prefixes.get(name, 0) + 1 + del names[0] + else: + break + # ignore the 'signed' prefix below, and reorder the others + newnames = [] + for prefix in ('unsigned', 'short', 'long'): + for i in range(prefixes.get(prefix, 0)): + newnames.append(prefix) + if not names: + names = ['int'] # implicitly + if names == ['int']: # but kill it if 'short' or 'long' + if 'short' in prefixes or 'long' in prefixes: + names = [] + names = newnames + names ident = ' '.join(names) if ident == 'void': return model.void_type @@ -318,10 +319,7 @@ # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - result = self._parse_function_type(typenode, name) - if consider_function_as_funcptr: - result = result.as_function_pointer() - return result + return self._parse_function_type(typenode, name) # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): @@ -352,21 +350,24 @@ isinstance(params[0].type.type, pycparser.c_ast.IdentifierType) and list(params[0].type.type.names) == ['void']): del params[0] - args = [self._get_type(argdeclnode.type, - convert_array_to_pointer=True, - consider_function_as_funcptr=True) + args = [self._as_func_arg(self._get_type(argdeclnode.type)) for argdeclnode in params] result = self._get_type(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _is_constant_declaration(self, typenode, const=False): - if isinstance(typenode, pycparser.c_ast.ArrayDecl): - return self._is_constant_declaration(typenode.type) + def _as_func_arg(self, type): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _is_constant_globalvar(self, typenode): if isinstance(typenode, pycparser.c_ast.PtrDecl): - const = 'const' in typenode.quals - return self._is_constant_declaration(typenode.type, const) + return 'const' in typenode.quals if isinstance(typenode, pycparser.c_ast.TypeDecl): - return const or 'const' in typenode.quals + return 'const' in typenode.quals return False def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): @@ -478,7 +479,7 @@ return tp def _make_partial(self, tp, nested): - if not isinstance(tp, model.StructType): + if not isinstance(tp, model.StructOrUnion): raise api.CDefError("%s cannot be partial" % (tp,)) if not tp.has_c_name() and not nested: raise NotImplementedError("%s is partial but has no C name" %(tp,)) @@ -498,10 +499,10 @@ if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): self._partial_length = True - return None + return '...' # - raise api.FFIError("unsupported non-constant or " - "not immediately constant expression") + raise api.FFIError("unsupported expression: expected a " + "simple numeric constant") def _build_enum_type(self, explicit_name, decls): if decls is not None: diff --git a/lib_pypy/cffi/gc_weakref.py b/lib_pypy/cffi/gc_weakref.py --- a/lib_pypy/cffi/gc_weakref.py +++ b/lib_pypy/cffi/gc_weakref.py @@ -14,6 +14,6 @@ def build(self, cdata, destructor): # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi.typeof(cdata), cdata) + new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) self.data[ref(new_cdata, self.remove)] = destructor, cdata return new_cdata diff --git a/lib_pypy/cffi/lock.py b/lib_pypy/cffi/lock.py new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,7 +1,10 @@ import weakref +from .lock import allocate_lock + class BaseTypeByIdentity(object): is_array_type = False + is_raw_function = False def get_c_name(self, replace_with='', context='a C file'): result = self.c_name_with_marker @@ -146,6 +149,7 @@ # a function, but not a pointer-to-function. The backend has no # notion of such a type; it's used temporarily by parsing. _base_pattern = '(&)(%s)' + is_raw_function = True def build_backend_type(self, ffi, finishlist): from . import api @@ -192,10 +196,6 @@ _base_pattern = " const *&" _base_pattern_array = "(const *&)" - def build_backend_type(self, ffi, finishlist): - BPtr = PointerType(self.totype).get_cached_btype(ffi, finishlist) - return BPtr - const_voidp_type = ConstPointerType(void_type) @@ -216,10 +216,12 @@ self.item = item self.length = length # - if self.length is None: + if length is None: brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' else: - brackets = '&[%d]' % self.length + brackets = '&[%d]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) @@ -227,6 +229,10 @@ return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): + if self.length == '...': + from . import api + raise api.CDefError("cannot render the type %r: unknown length" % + (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) @@ -252,6 +258,7 @@ class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None completed = False + partial = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -303,20 +310,21 @@ return # not completing it: it's an opaque struct # self.completed = 1 - fldtypes = tuple(tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes) # if self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) ffi._backend.complete_struct_or_union(BType, lst, self) # else: + fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # - if isinstance(ftype, ArrayType) and ftype.length is None: + if isinstance(ftype, ArrayType) and ftype.length == '...': # fix the length to match the total size BItemType = ftype.item.get_cached_btype(ffi, finishlist) nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) @@ -327,18 +335,20 @@ ftype = ftype.resolve_length(nlen) self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) - BArrayType = ftype.get_cached_btype(ffi, finishlist) - fldtypes = (fldtypes[:i] + (BArrayType,) + - fldtypes[i+1:]) - continue # - bitemsize = ffi.sizeof(fldtypes[i]) - if bitemsize != fsize: - self._verification_error( - "field '%s.%s' is declared as %d bytes, but is " - "really %d bytes" % (self.name, - self.fldnames[i] or '{}', - bitemsize, fsize)) + BFieldType = ftype.get_cached_btype(ffi, finishlist) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) + fldtypes.append(BFieldType) + # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) From noreply at buildbot.pypy.org Wed Nov 20 19:41:10 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 20 Nov 2013 19:41:10 +0100 (CET) Subject: [pypy-commit] pypy refactor-buffer-api: Fall back to calling app-level __buffer__ when it's not possible to call the interp-level buffer API. Message-ID: <20131120184110.57B771C0144@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-buffer-api Changeset: r68261:456bc410dd3a Date: 2013-11-20 19:40 +0100 http://bitbucket.org/pypy/pypy/changeset/456bc410dd3a/ Log: Fall back to calling app-level __buffer__ when it's not possible to call the interp-level buffer API. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -197,6 +197,12 @@ return None def buffer_w(self, space): + from pypy.module.__builtin__.interp_memoryview import W_Buffer + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if isinstance(w_result, W_Buffer): + return w_result.buf self._typed_unwrap_error(space, "buffer") def str_w(self, space): From noreply at buildbot.pypy.org Wed Nov 20 22:32:44 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 20 Nov 2013 22:32:44 +0100 (CET) Subject: [pypy-commit] pypy refactor-buffer-api: This seems to fix the test_ztranslation errors. Message-ID: <20131120213244.D542D1C02C5@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-buffer-api Changeset: r68262:6a1045ec1635 Date: 2013-11-20 22:31 +0100 http://bitbucket.org/pypy/pypy/changeset/6a1045ec1635/ Log: This seems to fix the test_ztranslation errors. diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -290,6 +290,11 @@ ec._py_repr = None return ec + def buffer_w(self, w_obj): + from pypy.interpreter.buffer import Buffer + is_root(w_obj) + return Buffer() + # ---------- def translates(self, func=None, argtypes=None, seeobj_w=[], **kwds): From noreply at buildbot.pypy.org Wed Nov 20 23:44:23 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 20 Nov 2013 23:44:23 +0100 (CET) Subject: [pypy-commit] pypy refactor-buffer-api: minor cleanup Message-ID: <20131120224423.CBF501C0225@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: refactor-buffer-api Changeset: r68263:e60a94e352e0 Date: 2013-11-20 14:43 -0800 http://bitbucket.org/pypy/pypy/changeset/e60a94e352e0/ Log: minor cleanup diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -197,10 +197,10 @@ return None def buffer_w(self, space): - from pypy.module.__builtin__.interp_memoryview import W_Buffer w_impl = space.lookup(self, '__buffer__') if w_impl is not None: w_result = space.get_and_call_function(w_impl, self) + from pypy.module.__builtin__.interp_memoryview import W_Buffer if isinstance(w_result, W_Buffer): return w_result.buf self._typed_unwrap_error(space, "buffer") diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -43,7 +43,7 @@ class MiniBuffer(W_Buffer): def __init__(self, buffer, keepalive=None): - W_Buffer. __init__(self, buffer) + W_Buffer.__init__(self, buffer) self.keepalive = keepalive MiniBuffer.typedef = TypeDef( diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -5,8 +5,8 @@ from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef, make_ref from pypy.interpreter.buffer import StringBuffer, SubBuffer from pypy.interpreter.error import OperationError +from pypy.module.__builtin__.interp_memoryview import W_Buffer from pypy.module.array.interp_array import ArrayBuffer -from pypy.module.__builtin__.interp_memoryview import W_Buffer PyBufferObjectStruct = lltype.ForwardReference() From noreply at buildbot.pypy.org Thu Nov 21 00:15:35 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 21 Nov 2013 00:15:35 +0100 (CET) Subject: [pypy-commit] pypy refactor-buffer-api: just rename the descr_news so they don't clash Message-ID: <20131120231535.CB3511C0144@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: refactor-buffer-api Changeset: r68264:943710084471 Date: 2013-11-20 15:14 -0800 http://bitbucket.org/pypy/pypy/changeset/943710084471/ Log: just rename the descr_news so they don't clash diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -44,7 +44,7 @@ @staticmethod @unwrap_spec(offset=int, size=int) - def descr_new(space, w_subtype, w_object, offset=0, size=-1): + def descr_new_buffer(space, w_subtype, w_object, offset=0, size=-1): if space.isinstance_w(w_object, space.w_unicode): # unicode objects support the old buffer interface # but not the new buffer interface (change in python 2.7) @@ -72,8 +72,6 @@ else: buf = buffer.SubBuffer(buf, offset, size) return W_Buffer(buf) - # hack to fix translation - descr_new.__get__('foo').__name__ = 'descr_new_buffer' def descr_len(self, space): return space.wrap(self.buf.getlength()) @@ -147,7 +145,7 @@ start of the object (or at the specified offset). The slice will extend to the end of the target object (or with the specified size). """, - __new__ = interp2app(W_Buffer.descr_new), + __new__ = interp2app(W_Buffer.descr_new_buffer), __len__ = interp2app(W_Buffer.descr_len), __getitem__ = interp2app(W_Buffer.descr_getitem), __setitem__ = interp2app(W_Buffer.descr_setitem), @@ -179,7 +177,7 @@ return self.buf @staticmethod - def descr_new(space, w_subtype, w_object): + def descr_new_memoryview(space, w_subtype, w_object): w_memoryview = W_MemoryView(space.buffer_w(w_object)) return w_memoryview @@ -296,7 +294,7 @@ __doc__ = """\ Create a new memoryview object which references the given object. """, - __new__ = interp2app(W_MemoryView.descr_new), + __new__ = interp2app(W_MemoryView.descr_new_memoryview), __eq__ = interp2app(W_MemoryView.descr_eq), __ge__ = interp2app(W_MemoryView.descr_ge), __getitem__ = interp2app(W_MemoryView.descr_getitem), From noreply at buildbot.pypy.org Thu Nov 21 10:57:41 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 21 Nov 2013 10:57:41 +0100 (CET) Subject: [pypy-commit] stmgc default: more asserts and fix the case when start_exclusivelock() returns in a Message-ID: <20131121095741.E04911C042B@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r550:c0bc566a9af3 Date: 2013-11-21 10:57 +0100 http://bitbucket.org/pypy/stmgc/changeset/c0bc566a9af3/ Log: more asserts and fix the case when start_exclusivelock() returns in a transaction that needs to abort (same as after stm_start_sharedlock()) diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -1745,6 +1745,8 @@ /* there may be a thread holding the collection lock because it steals a stub belonging to the thread that previously owned this descriptor. + (not currently, as we do a start_exclusivelock() + before calling DescriptorInit) */ } else { @@ -1809,6 +1811,10 @@ gcptrlist_delete(&d->public_descriptor->stolen_objects); gcptrlist_delete(&d->public_descriptor->stolen_young_stubs); + assert(d->young_weakrefs.size == 0); + assert(d->public_with_young_copy.size == 0); + assert(d->old_objects_to_trace.size == 0); + stmgcpage_done_tls(); i = d->public_descriptor_index; assert(stm_descriptor_array[i] == d->public_descriptor); diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -278,6 +278,9 @@ static void trace_stub(struct tx_descriptor *d, gcptr S) { + /* ignore stub if it is outdated, because then the transaction + will abort (or has been aborted long ago) */ + revision_t w = ACCESS_ONCE(S->h_revision); if ((w & 3) != 2) { /* P has a ptr in h_revision, but this object is not a stub diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -256,8 +256,11 @@ memset(&sd.all_stubs, 0, sizeof(sd.all_stubs)); steal_data = &sd; stmgc_trace(L, &replace_ptr_to_protected_with_stub); - if (L->h_tid & GCFLAG_WEAKREF) + if (L->h_tid & GCFLAG_WEAKREF) { + /* We have to trace the weakref manually because stmgc_trace + doesn't */ replace_ptr_to_protected_with_stub(WEAKREF_PTR(L, stmgc_size(L))); + } g2l_delete_not_used_any_more(&sd.all_stubs); /* If another thread (the foreign or a 3rd party) does a read diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -88,6 +88,7 @@ XXX: remove again when sure it is not needed (interaction with stop_all_other_threads()) */ start_exclusivelock(); + assert(stm_active == 0); stmgcpage_acquire_global_lock(); #ifdef STM_BARRIER_COUNT static int seen = 0; @@ -116,6 +117,7 @@ if (token == 1) { start_exclusivelock(); + assert(stm_active == 0); stmgcpage_acquire_global_lock(); done_shadowstack(); stmgc_done_nursery(); @@ -384,6 +386,14 @@ ACCESS_ONCE(sync_required) = -1; stm_stop_sharedlock(); start_exclusivelock(); + if (stm_active < 0) { + /* we have to give up and abort. Another thread did + a major collect and makes us abort now */ + stop_exclusivelock(); + stm_start_sharedlock(); + assert(stm_active < 0); + AbortNowIfDelayed(); + } ACCESS_ONCE(sync_required) = 0; assert(in_single_thread == NULL); @@ -400,6 +410,10 @@ stop_exclusivelock(); stm_start_sharedlock(); + + /* another thread may commit, start a major collect, and + make us abort */ + AbortNowIfDelayed(); } void stm_possible_safe_point(void) diff --git a/c4/weakref.c b/c4/weakref.c --- a/c4/weakref.c +++ b/c4/weakref.c @@ -39,10 +39,13 @@ if (stmgc_is_in_nursery(d, pointing_to)) { if (pointing_to->h_tid & GCFLAG_MOVED) { + gcptr to = (gcptr)pointing_to->h_revision; dprintf(("weakref ptr moved %p->%p\n", - *WEAKREF_PTR(weakref, size), - (gcptr)pointing_to->h_revision)); - *WEAKREF_PTR(weakref, size) = (gcptr)pointing_to->h_revision; + *WEAKREF_PTR(weakref, size), to)); + *WEAKREF_PTR(weakref, size) = to; + assert(to->h_tid & GCFLAG_OLD); + assert(!(to->h_tid & GCFLAG_MOVED)); + assert(!(pointing_to->h_tid & GCFLAG_OLD)); } else { assert(!IS_POINTER(pointing_to->h_revision)); From noreply at buildbot.pypy.org Thu Nov 21 13:23:05 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 21 Nov 2013 13:23:05 +0100 (CET) Subject: [pypy-commit] stmgc default: fix possible races for minor collections that look at possibly locked, global Message-ID: <20131121122305.C59961C0F38@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r551:6e5022b6107e Date: 2013-11-21 13:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/6e5022b6107e/ Log: fix possible races for minor collections that look at possibly locked, global objects diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -140,7 +140,8 @@ #define SPLP_LOCKED_INFLIGHT 1 #define SPLP_LOCKED_VALIDATE 2 #define SPLP_LOCKED_COMMIT 3 -#define SPINLOOP_REASONS 4 +#define SPLP_LOCKED_COLLECT 4 +#define SPINLOOP_REASONS 5 /* this struct contains thread-local data that may be occasionally * accessed by a foreign thread and that must stay around after the diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -55,6 +55,9 @@ || *d->nursery_current_ref == d->nursery_end); stm_free(d->nursery_base); + assert(d->public_with_young_copy.size == 0); + assert(d->old_objects_to_trace.size == 0); + assert(d->young_weakrefs.size == 0); gcptrlist_delete(&d->old_objects_to_trace); gcptrlist_delete(&d->public_with_young_copy); gcptrlist_delete(&d->young_weakrefs); @@ -281,7 +284,16 @@ /* ignore stub if it is outdated, because then the transaction will abort (or has been aborted long ago) */ - revision_t w = ACCESS_ONCE(S->h_revision); + revision_t w; + + retry: + w = ACCESS_ONCE(S->h_revision); + if (!IS_POINTER(w) && w >= LOCKED) { + /* check again when unlocked */ + SpinLoop(SPLP_LOCKED_COLLECT); + goto retry; + } + if ((w & 3) != 2) { /* P has a ptr in h_revision, but this object is not a stub with a protected pointer. It has likely been the case @@ -335,6 +347,7 @@ */ long i, size = d->public_with_young_copy.size; gcptr *items = d->public_with_young_copy.items; + revision_t v; for (i = 0; i < size; i++) { gcptr P = items[i]; @@ -342,7 +355,7 @@ assert(P->h_tid & GCFLAG_OLD); assert(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); - revision_t v = ACCESS_ONCE(P->h_revision); + v = ACCESS_ONCE(P->h_revision); wlog_t *item; G2L_FIND(d->public_to_private, P, item, goto not_in_public_to_private); @@ -363,10 +376,19 @@ item->addr, item->val)); assert(_stm_is_private(item->val)); visit_if_young(&item->val); + assert(item->val->h_tid & GCFLAG_OLD); continue; not_in_public_to_private: + /* re-read because of possible spinloop */ + v = ACCESS_ONCE(P->h_revision); + if (!IS_POINTER(v)) { + if (v >= LOCKED) { + /* check again when unlocked */ + SpinLoop(SPLP_LOCKED_COLLECT); + goto not_in_public_to_private; + } /* P is neither a key in public_to_private nor outdated. It must come from an older transaction that aborted. Nothing to do now. From noreply at buildbot.pypy.org Thu Nov 21 15:23:44 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 21 Nov 2013 15:23:44 +0100 (CET) Subject: [pypy-commit] stmgc default: finally reproduce crash as a testcase Message-ID: <20131121142344.227571C31E9@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r552:754b5dc700b8 Date: 2013-11-21 15:23 +0100 http://bitbucket.org/pypy/stmgc/changeset/754b5dc700b8/ Log: finally reproduce crash as a testcase diff --git a/c4/test/test_weakref.py b/c4/test/test_weakref.py --- a/c4/test/test_weakref.py +++ b/c4/test/test_weakref.py @@ -54,6 +54,61 @@ p1 = lib.stm_pop_root() assert lib.rawgetptr(p1, 0) == p2 + + def test_old_private_not_keep_alive_weakref(self): + p = palloc(HDR + WORD) + q = palloc_refs(1) + + def f1(c): + if c == 1: + # currently fails because: + # p1 still in old_objects_to_trace + # -> keeps alive weakp1w + # -> stm_move_young_weakrefs() sees a weakref pointing + # to an aborted object + minor_collect() + return + + # allocate the "container" as old, private q1 + q1 = lib.stm_write_barrier(q) + assert classify(q1) == "private" + lib.stm_push_root(q1) + minor_collect() + q1 = lib.stm_pop_root() + assert classify(q1) == "private" + assert q1.h_tid & GCFLAG_OLD + assert q1.h_tid & GCFLAG_WRITE_BARRIER + + # allocate young private p1 to point to + p1 = lib.stm_write_barrier(p) + assert ffi.cast("gcptr", p1.h_original) == p + assert classify(p1) == "private" + assert not (p1.h_tid & GCFLAG_OLD) + + lib.stm_push_root(p1) + lib.stm_push_root(q1) + weakp1w = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p1) + q1 = lib.stm_pop_root() + p1 = lib.stm_pop_root() + # q1 still old, p1 still young, weakp1w also young + + q1w = lib.stm_write_barrier(q1) + # add q1 to old_objects_to_trace + assert q1 == q1w # was and is private + lib.rawsetptr(q1, 0, weakp1w) + + abort_and_retry() + + perform_transaction(f1) + + + + + + + + + class TestMajorCollection(BaseTest): From noreply at buildbot.pypy.org Thu Nov 21 16:05:08 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 21 Nov 2013 16:05:08 +0100 (CET) Subject: [pypy-commit] stmgc default: fix failing case from last commit by purging objects from aborted transactions Message-ID: <20131121150508.A7BC31C31E9@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r553:281794862f21 Date: 2013-11-21 16:00 +0100 http://bitbucket.org/pypy/stmgc/changeset/281794862f21/ Log: fix failing case from last commit by purging objects from aborted transactions from old_objects_to_trace diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -738,9 +738,11 @@ W = LocalizePublic(d, R); assert(is_private(W)); - if (W->h_tid & GCFLAG_OLD) + if (W->h_tid & GCFLAG_OLD) { + /* XXX: probably unnecessary as it is done in allocate_next_section + already */ gcptrlist_insert(&d->old_objects_to_trace, W); - else + } else gcptrlist_insert(&d->public_with_young_copy, R); } else @@ -900,6 +902,21 @@ smp_spinloop(); } +static void purge_private_objs_from_old_objects_to_trace() +{ + struct tx_descriptor *d = thread_descriptor; + int i, size = d->old_objects_to_trace.size; + gcptr *items = d->old_objects_to_trace.items; + + for(i = 0; i < size; i++) { + if (items[i] && items[i]->h_revision == stm_private_rev_num) { + /* private objects from the same aborting transaction */ + items[i] = NULL; + dprintf(("purge old private object %p\n", items[i])); + } + } +} + void stm_abort_and_retry(void) { AbortTransaction(ABRT_MANUAL); @@ -990,6 +1007,12 @@ stm_thread_local_obj = d->old_thread_local_obj; d->old_thread_local_obj = NULL; + /* remove old private objects from old_objects_to_trace + because they never have to be traced (also because + weakrefs are kept alive even when their target is not + and stm_move_young_weakrefs doesn't handle that). */ + purge_private_objs_from_old_objects_to_trace(); + // notifies the CPU that we're potentially in a spin loop SpinLoop(SPLP_ABORT); diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -408,7 +408,8 @@ { while (gcptrlist_size(&d->old_objects_to_trace) > 0) { gcptr obj = gcptrlist_pop(&d->old_objects_to_trace); - + if (!obj) + continue; assert(obj->h_tid & GCFLAG_OLD); assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); From noreply at buildbot.pypy.org Thu Nov 21 16:05:09 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 21 Nov 2013 16:05:09 +0100 (CET) Subject: [pypy-commit] stmgc default: fix bug in demo_random.c Message-ID: <20131121150509.CED361C31EA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r554:b820cff55e7a Date: 2013-11-21 16:04 +0100 http://bitbucket.org/pypy/stmgc/changeset/b820cff55e7a/ Log: fix bug in demo_random.c diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -566,6 +566,7 @@ check((gcptr)ww->node); } else { + t = (nodeptr)write_barrier(ptrs[i]); t->weakref = NULL; } } From noreply at buildbot.pypy.org Thu Nov 21 16:07:36 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 21 Nov 2013 16:07:36 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: import stmgc with various fixes Message-ID: <20131121150736.48D741C31E9@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68265:578f115a3f4f Date: 2013-11-21 16:06 +0100 http://bitbucket.org/pypy/pypy/changeset/578f115a3f4f/ Log: import stmgc with various fixes diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -739,9 +739,11 @@ W = LocalizePublic(d, R); assert(is_private(W)); - if (W->h_tid & GCFLAG_OLD) + if (W->h_tid & GCFLAG_OLD) { + /* XXX: probably unnecessary as it is done in allocate_next_section + already */ gcptrlist_insert(&d->old_objects_to_trace, W); - else + } else gcptrlist_insert(&d->public_with_young_copy, R); } else @@ -901,6 +903,21 @@ smp_spinloop(); } +static void purge_private_objs_from_old_objects_to_trace() +{ + struct tx_descriptor *d = thread_descriptor; + int i, size = d->old_objects_to_trace.size; + gcptr *items = d->old_objects_to_trace.items; + + for(i = 0; i < size; i++) { + if (items[i] && items[i]->h_revision == stm_private_rev_num) { + /* private objects from the same aborting transaction */ + items[i] = NULL; + dprintf(("purge old private object %p\n", items[i])); + } + } +} + void stm_abort_and_retry(void) { AbortTransaction(ABRT_MANUAL); @@ -991,6 +1008,12 @@ stm_thread_local_obj = d->old_thread_local_obj; d->old_thread_local_obj = NULL; + /* remove old private objects from old_objects_to_trace + because they never have to be traced (also because + weakrefs are kept alive even when their target is not + and stm_move_young_weakrefs doesn't handle that). */ + purge_private_objs_from_old_objects_to_trace(); + // notifies the CPU that we're potentially in a spin loop SpinLoop(SPLP_ABORT); @@ -1746,6 +1769,8 @@ /* there may be a thread holding the collection lock because it steals a stub belonging to the thread that previously owned this descriptor. + (not currently, as we do a start_exclusivelock() + before calling DescriptorInit) */ } else { @@ -1810,6 +1835,10 @@ gcptrlist_delete(&d->public_descriptor->stolen_objects); gcptrlist_delete(&d->public_descriptor->stolen_young_stubs); + assert(d->young_weakrefs.size == 0); + assert(d->public_with_young_copy.size == 0); + assert(d->old_objects_to_trace.size == 0); + stmgcpage_done_tls(); i = d->public_descriptor_index; assert(stm_descriptor_array[i] == d->public_descriptor); diff --git a/rpython/translator/stm/src_stm/et.h b/rpython/translator/stm/src_stm/et.h --- a/rpython/translator/stm/src_stm/et.h +++ b/rpython/translator/stm/src_stm/et.h @@ -141,7 +141,8 @@ #define SPLP_LOCKED_INFLIGHT 1 #define SPLP_LOCKED_VALIDATE 2 #define SPLP_LOCKED_COMMIT 3 -#define SPINLOOP_REASONS 4 +#define SPLP_LOCKED_COLLECT 4 +#define SPINLOOP_REASONS 5 /* this struct contains thread-local data that may be occasionally * accessed by a foreign thread and that must stay around after the diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c --- a/rpython/translator/stm/src_stm/nursery.c +++ b/rpython/translator/stm/src_stm/nursery.c @@ -56,6 +56,9 @@ || *d->nursery_current_ref == d->nursery_end); stm_free(d->nursery_base); + assert(d->public_with_young_copy.size == 0); + assert(d->old_objects_to_trace.size == 0); + assert(d->young_weakrefs.size == 0); gcptrlist_delete(&d->old_objects_to_trace); gcptrlist_delete(&d->public_with_young_copy); gcptrlist_delete(&d->young_weakrefs); @@ -279,7 +282,19 @@ static void trace_stub(struct tx_descriptor *d, gcptr S) { - revision_t w = ACCESS_ONCE(S->h_revision); + /* ignore stub if it is outdated, because then the transaction + will abort (or has been aborted long ago) */ + + revision_t w; + + retry: + w = ACCESS_ONCE(S->h_revision); + if (!IS_POINTER(w) && w >= LOCKED) { + /* check again when unlocked */ + SpinLoop(SPLP_LOCKED_COLLECT); + goto retry; + } + if ((w & 3) != 2) { /* P has a ptr in h_revision, but this object is not a stub with a protected pointer. It has likely been the case @@ -333,6 +348,7 @@ */ long i, size = d->public_with_young_copy.size; gcptr *items = d->public_with_young_copy.items; + revision_t v; for (i = 0; i < size; i++) { gcptr P = items[i]; @@ -340,7 +356,7 @@ assert(P->h_tid & GCFLAG_OLD); assert(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); - revision_t v = ACCESS_ONCE(P->h_revision); + v = ACCESS_ONCE(P->h_revision); wlog_t *item; G2L_FIND(d->public_to_private, P, item, goto not_in_public_to_private); @@ -361,10 +377,19 @@ item->addr, item->val)); assert(_stm_is_private(item->val)); visit_if_young(&item->val); + assert(item->val->h_tid & GCFLAG_OLD); continue; not_in_public_to_private: + /* re-read because of possible spinloop */ + v = ACCESS_ONCE(P->h_revision); + if (!IS_POINTER(v)) { + if (v >= LOCKED) { + /* check again when unlocked */ + SpinLoop(SPLP_LOCKED_COLLECT); + goto not_in_public_to_private; + } /* P is neither a key in public_to_private nor outdated. It must come from an older transaction that aborted. Nothing to do now. @@ -384,7 +409,8 @@ { while (gcptrlist_size(&d->old_objects_to_trace) > 0) { gcptr obj = gcptrlist_pop(&d->old_objects_to_trace); - + if (!obj) + continue; assert(obj->h_tid & GCFLAG_OLD); assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -b89b61f0df98 +b820cff55e7a diff --git a/rpython/translator/stm/src_stm/steal.c b/rpython/translator/stm/src_stm/steal.c --- a/rpython/translator/stm/src_stm/steal.c +++ b/rpython/translator/stm/src_stm/steal.c @@ -257,8 +257,11 @@ memset(&sd.all_stubs, 0, sizeof(sd.all_stubs)); steal_data = &sd; stmgc_trace(L, &replace_ptr_to_protected_with_stub); - if (L->h_tid & GCFLAG_WEAKREF) + if (L->h_tid & GCFLAG_WEAKREF) { + /* We have to trace the weakref manually because stmgc_trace + doesn't */ replace_ptr_to_protected_with_stub(WEAKREF_PTR(L, stmgc_size(L))); + } g2l_delete_not_used_any_more(&sd.all_stubs); /* If another thread (the foreign or a 3rd party) does a read diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c --- a/rpython/translator/stm/src_stm/stmsync.c +++ b/rpython/translator/stm/src_stm/stmsync.c @@ -89,6 +89,7 @@ XXX: remove again when sure it is not needed (interaction with stop_all_other_threads()) */ start_exclusivelock(); + assert(stm_active == 0); stmgcpage_acquire_global_lock(); #ifdef STM_BARRIER_COUNT static int seen = 0; @@ -117,6 +118,7 @@ if (token == 1) { start_exclusivelock(); + assert(stm_active == 0); stmgcpage_acquire_global_lock(); done_shadowstack(); stmgc_done_nursery(); @@ -385,6 +387,14 @@ ACCESS_ONCE(sync_required) = -1; stm_stop_sharedlock(); start_exclusivelock(); + if (stm_active < 0) { + /* we have to give up and abort. Another thread did + a major collect and makes us abort now */ + stop_exclusivelock(); + stm_start_sharedlock(); + assert(stm_active < 0); + AbortNowIfDelayed(); + } ACCESS_ONCE(sync_required) = 0; assert(in_single_thread == NULL); @@ -401,6 +411,10 @@ stop_exclusivelock(); stm_start_sharedlock(); + + /* another thread may commit, start a major collect, and + make us abort */ + AbortNowIfDelayed(); } void stm_possible_safe_point(void) diff --git a/rpython/translator/stm/src_stm/weakref.c b/rpython/translator/stm/src_stm/weakref.c --- a/rpython/translator/stm/src_stm/weakref.c +++ b/rpython/translator/stm/src_stm/weakref.c @@ -40,10 +40,13 @@ if (stmgc_is_in_nursery(d, pointing_to)) { if (pointing_to->h_tid & GCFLAG_MOVED) { + gcptr to = (gcptr)pointing_to->h_revision; dprintf(("weakref ptr moved %p->%p\n", - *WEAKREF_PTR(weakref, size), - (gcptr)pointing_to->h_revision)); - *WEAKREF_PTR(weakref, size) = (gcptr)pointing_to->h_revision; + *WEAKREF_PTR(weakref, size), to)); + *WEAKREF_PTR(weakref, size) = to; + assert(to->h_tid & GCFLAG_OLD); + assert(!(to->h_tid & GCFLAG_MOVED)); + assert(!(pointing_to->h_tid & GCFLAG_OLD)); } else { assert(!IS_POINTER(pointing_to->h_revision)); From noreply at buildbot.pypy.org Thu Nov 21 19:10:31 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 21 Nov 2013 19:10:31 +0100 (CET) Subject: [pypy-commit] pypy default: Fix for SSLSocket.makefile() Message-ID: <20131121181031.C357B1C0144@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68266:963c6d6d7d6c Date: 2013-11-21 10:09 -0800 http://bitbucket.org/pypy/pypy/changeset/963c6d6d7d6c/ Log: Fix for SSLSocket.makefile() Test program: pypy -c "import socket, ssl; s = ssl.wrap_socket(socket.create_connection(('pypi.python.org', 443))); s.makefile().close(); print s.fileno()" diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: From noreply at buildbot.pypy.org Thu Nov 21 23:51:55 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 21 Nov 2013 23:51:55 +0100 (CET) Subject: [pypy-commit] pypy voidtype_strformat: wip Message-ID: <20131121225155.02C2F1C31F8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: voidtype_strformat Changeset: r68267:65b52ef529c0 Date: 2013-11-19 00:24 +0200 http://bitbucket.org/pypy/pypy/changeset/65b52ef529c0/ Log: wip diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -36,7 +36,11 @@ return backstrides def getitem(self, index): - return self.dtype.itemtype.read(self, index, 0) + from pypy.module.micronumpy.types import VoidType + it = self.dtype.itemtype + if isinstance(it, VoidType): + return it.readarray(self, index, 0, self.dtype) + return it.read(self, index, 0) def getitem_bool(self, index): return self.dtype.itemtype.read_bool(self, index, 0) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -429,7 +429,13 @@ def find_binop_result_dtype(space, dt1, dt2, promote_to_float=False, promote_bools=False): # dt1.num should be <= dt2.num - if dt1.num > dt2.num: + dt1num = dt1.num + dt2num = dt2.num + if dt1num == 20: #void type + dt1num = dt1.subdtype.num + if dt2num == 20: + dt2num = dt2.subdtype.num + if dt1num > dt2num: dt1, dt2 = dt2, dt1 # Some operations promote op(bool, bool) to return int8, rather than bool if promote_bools and (dt1.kind == dt2.kind == NPY_GENBOOLLTR): @@ -437,11 +443,11 @@ # Everything numeric promotes to complex if dt2.is_complex_type() or dt1.is_complex_type(): - if dt2.num == NPY_CFLOAT: + if dt2num == NPY_CFLOAT: return interp_dtype.get_dtype_cache(space).w_complex64dtype - elif dt2.num == NPY_CDOUBLE: + elif dt2num == NPY_CDOUBLE: return interp_dtype.get_dtype_cache(space).w_complex128dtype - elif dt2.num == NPY_CLONGDOUBLE: + elif dt2num == NPY_CLONGDOUBLE: return interp_dtype.get_dtype_cache(space).w_complexlongdtype else: raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) @@ -455,7 +461,7 @@ # Everything promotes to float, and bool promotes to everything. if dt2.kind == NPY_FLOATINGLTR or dt1.kind == NPY_GENBOOLLTR: # Float32 + 8-bit int = Float64 - if dt2.num == NPY_FLOAT and dt1.itemtype.get_element_size() >= 4: + if dt2num == NPY_FLOAT and dt1.itemtype.get_element_size() >= 4: return interp_dtype.get_dtype_cache(space).w_float64dtype return dt2 @@ -465,8 +471,8 @@ if dt1.itemtype.get_element_size() < dt2.itemtype.get_element_size(): return dt2 # we need to promote both dtypes - dtypenum = dt2.num + 2 - elif dt2.num == NPY_ULONGLONG or (LONG_BIT == 64 and dt2.num == NPY_ULONG): + dtypenum = dt2num + 2 + elif dt2num == NPY_ULONGLONG or (LONG_BIT == 64 and dt2num == NPY_ULONG): # UInt64 + signed = Float64 dtypenum = NPY_DOUBLE elif dt2.is_flexible_type(): @@ -481,7 +487,7 @@ return dt2 else: # increase to the next signed type - dtypenum = dt2.num + 1 + dtypenum = dt2num + 1 newdtype = interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum] if (newdtype.itemtype.get_element_size() > dt2.itemtype.get_element_size() or diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3068,14 +3068,18 @@ ([4,3], [[7.,5.],[7.,5.]], 9), ] h = np.array(buf, dtype=descr) + print '3' assert len(h) == 2 - skip('broken') # XXX for v in (h, h[0], h['x']): + print '2',type(v),v repr(v) # check for crash in repr + print '1' assert (h['x'] == np.array([buf[0][0], buf[1][0]], dtype='i4')).all() + print '4' assert (h['y'] == np.array([buf[0][1], buf[1][1]], dtype='f8')).all() + print '5' assert (h['z'] == np.array([buf[0][2], buf[1][2]], dtype='u1')).all() From noreply at buildbot.pypy.org Thu Nov 21 23:51:56 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 21 Nov 2013 23:51:56 +0100 (CET) Subject: [pypy-commit] pypy voidtype_strformat: fix multirow record array access Message-ID: <20131121225156.2F8F01C31F8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: voidtype_strformat Changeset: r68268:5aaafb1df4d8 Date: 2013-11-21 23:52 +0200 http://bitbucket.org/pypy/pypy/changeset/5aaafb1df4d8/ Log: fix multirow record array access diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -61,10 +61,24 @@ def apply(self, space, orig_arr): arr = orig_arr.implementation ofs, subdtype = arr.dtype.fields[self.name] - # strides backstrides are identical, ofs only changes start - return W_NDimArray.new_slice(space, arr.start + ofs, arr.get_strides(), - arr.get_backstrides(), - arr.shape, arr, orig_arr, subdtype) + # ofs only changes start + # create a view of the original array by extending + # the shape, strides, backstrides of the array + from pypy.module.micronumpy.support import calc_strides + strides, backstrides = calc_strides(subdtype.shape, + subdtype.subdtype, arr.order) + final_shape = arr.shape + subdtype.shape + final_strides = arr.get_strides()[:] + final_strides += strides + final_backstrides = arr.get_backstrides()[:] + final_backstrides += backstrides + final_dtype = subdtype + print self.name,'strides',arr.get_strides(),strides + if subdtype.subdtype: + final_dtype = subdtype.subdtype + return W_NDimArray.new_slice(space, arr.start + ofs, final_strides, + final_backstrides, + final_shape, arr, orig_arr, final_dtype) class Chunks(BaseChunk): def __init__(self, l): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3069,7 +3069,9 @@ ] h = np.array(buf, dtype=descr) assert len(h) == 2 - skip('broken') # XXX + assert h['x'].shape == (2, 2) + assert h['y'].strides == (41, 16, 8) + assert h['z'].shape == (2,) for v in (h, h[0], h['x']): repr(v) # check for crash in repr assert (h['x'] == np.array([buf[0][0], @@ -3107,9 +3109,10 @@ [[7, 8, 9], [10, 11, 12]]])], dtype=dt) s = str(a) - assert s.endswith("[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " \ + assert s.endswith("[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " "[[7, 8, 9], [10, 11, 12]]])]") + def test_issue_1589(self): import numpypy as numpy c = numpy.array([[(1, 2, 'a'), (3, 4, 'b')], [(5, 6, 'c'), (7, 8, 'd')]], diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1789,24 +1789,16 @@ dtype.subdtype) return W_NDimArray(implementation) - def str_format(self, val): - # only called with the results of readarray() - from pypy.module.micronumpy.base import W_NDimArray - assert isinstance(val, W_NDimArray) - i = val.create_iter() - first = True - dtype = val.get_dtype() - s = StringBuilder() - s.append('[') - while not i.done(): - if first: - first = False - else: - s.append(', ') - s.append(dtype.itemtype.str_format(i.getitem())) - i.next() - s.append(']') - return s.build() + def read(self, arr, i, offset, dtype=None): + if dtype is None: + dtype = arr.dtype + return interp_boxes.W_VoidBox(arr, i + offset, dtype) + + @jit.unroll_safe + def str_format(self, box): + assert isinstance(box, interp_boxes.W_VoidBox) + arr = self.readarray(box.arr, box.ofs, 0, box.dtype) + return arr.dump_data(prefix='', suffix='') class RecordType(FlexibleType): T = lltype.Char @@ -1867,10 +1859,7 @@ first = False else: pieces.append(", ") - if isinstance(tp, VoidType): - val = tp.readarray(box.arr, box.ofs, ofs, subdtype) - else: - val = tp.read(box.arr, box.ofs, ofs, subdtype) + val = tp.read(box.arr, box.ofs, ofs, subdtype) pieces.append(tp.str_format(val)) pieces.append(")") return "".join(pieces) From noreply at buildbot.pypy.org Thu Nov 21 23:51:57 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 21 Nov 2013 23:51:57 +0100 (CET) Subject: [pypy-commit] pypy voidtype_strformat: extend dump_data Message-ID: <20131121225157.5CE1D1C31F8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: voidtype_strformat Changeset: r68269:e16a81d977f5 Date: 2013-11-21 23:59 +0200 http://bitbucket.org/pypy/pypy/changeset/e16a81d977f5/ Log: extend dump_data diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -243,12 +243,13 @@ return space.wrap(self.dump_data()) return space.call_function(cache.w_array_str, self) - def dump_data(self): + def dump_data(self, prefix='array(', suffix=')'): i = self.create_iter() first = True dtype = self.get_dtype() s = StringBuilder() - s.append('array([') + s.append(prefix) + s.append('[') while not i.done(): if first: first = False @@ -256,7 +257,8 @@ s.append(', ') s.append(dtype.itemtype.str_format(i.getitem())) i.next() - s.append('])') + s.append(']') + s.append(suffix) return s.build() def create_iter(self, shape=None, backward_broadcast=False, require_index=False): From noreply at buildbot.pypy.org Thu Nov 21 23:51:58 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 21 Nov 2013 23:51:58 +0100 (CET) Subject: [pypy-commit] pypy voidtype_strformat: skip until we can call app-level array printing inside types' str_format method Message-ID: <20131121225158.810551C31F8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: voidtype_strformat Changeset: r68270:4d51ede78b58 Date: 2013-11-22 00:07 +0200 http://bitbucket.org/pypy/pypy/changeset/4d51ede78b58/ Log: skip until we can call app-level array printing inside types' str_format method diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3109,6 +3109,7 @@ [[7, 8, 9], [10, 11, 12]]])], dtype=dt) s = str(a) + skip('incorrect formatting via dump_data') assert s.endswith("[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " "[[7, 8, 9], [10, 11, 12]]])]") From noreply at buildbot.pypy.org Thu Nov 21 23:51:59 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 21 Nov 2013 23:51:59 +0100 (CET) Subject: [pypy-commit] pypy voidtype_strformat: merge heads Message-ID: <20131121225159.AB9241C31F8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: voidtype_strformat Changeset: r68271:7c21f9756300 Date: 2013-11-22 00:09 +0200 http://bitbucket.org/pypy/pypy/changeset/7c21f9756300/ Log: merge heads diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -36,7 +36,11 @@ return backstrides def getitem(self, index): - return self.dtype.itemtype.read(self, index, 0) + from pypy.module.micronumpy.types import VoidType + it = self.dtype.itemtype + if isinstance(it, VoidType): + return it.readarray(self, index, 0, self.dtype) + return it.read(self, index, 0) def getitem_bool(self, index): return self.dtype.itemtype.read_bool(self, index, 0) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -429,7 +429,13 @@ def find_binop_result_dtype(space, dt1, dt2, promote_to_float=False, promote_bools=False): # dt1.num should be <= dt2.num - if dt1.num > dt2.num: + dt1num = dt1.num + dt2num = dt2.num + if dt1num == 20: #void type + dt1num = dt1.subdtype.num + if dt2num == 20: + dt2num = dt2.subdtype.num + if dt1num > dt2num: dt1, dt2 = dt2, dt1 # Some operations promote op(bool, bool) to return int8, rather than bool if promote_bools and (dt1.kind == dt2.kind == NPY_GENBOOLLTR): @@ -437,11 +443,11 @@ # Everything numeric promotes to complex if dt2.is_complex_type() or dt1.is_complex_type(): - if dt2.num == NPY_CFLOAT: + if dt2num == NPY_CFLOAT: return interp_dtype.get_dtype_cache(space).w_complex64dtype - elif dt2.num == NPY_CDOUBLE: + elif dt2num == NPY_CDOUBLE: return interp_dtype.get_dtype_cache(space).w_complex128dtype - elif dt2.num == NPY_CLONGDOUBLE: + elif dt2num == NPY_CLONGDOUBLE: return interp_dtype.get_dtype_cache(space).w_complexlongdtype else: raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) @@ -455,7 +461,7 @@ # Everything promotes to float, and bool promotes to everything. if dt2.kind == NPY_FLOATINGLTR or dt1.kind == NPY_GENBOOLLTR: # Float32 + 8-bit int = Float64 - if dt2.num == NPY_FLOAT and dt1.itemtype.get_element_size() >= 4: + if dt2num == NPY_FLOAT and dt1.itemtype.get_element_size() >= 4: return interp_dtype.get_dtype_cache(space).w_float64dtype return dt2 @@ -465,8 +471,8 @@ if dt1.itemtype.get_element_size() < dt2.itemtype.get_element_size(): return dt2 # we need to promote both dtypes - dtypenum = dt2.num + 2 - elif dt2.num == NPY_ULONGLONG or (LONG_BIT == 64 and dt2.num == NPY_ULONG): + dtypenum = dt2num + 2 + elif dt2num == NPY_ULONGLONG or (LONG_BIT == 64 and dt2num == NPY_ULONG): # UInt64 + signed = Float64 dtypenum = NPY_DOUBLE elif dt2.is_flexible_type(): @@ -481,7 +487,7 @@ return dt2 else: # increase to the next signed type - dtypenum = dt2.num + 1 + dtypenum = dt2num + 1 newdtype = interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum] if (newdtype.itemtype.get_element_size() > dt2.itemtype.get_element_size() or From noreply at buildbot.pypy.org Thu Nov 21 23:52:00 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 21 Nov 2013 23:52:00 +0100 (CET) Subject: [pypy-commit] pypy voidtype_strformat: fix translation Message-ID: <20131121225200.D1C631C31F8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: voidtype_strformat Changeset: r68272:dcc96dc918b2 Date: 2013-11-22 00:31 +0200 http://bitbucket.org/pypy/pypy/changeset/dcc96dc918b2/ Log: fix translation diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -68,10 +68,8 @@ strides, backstrides = calc_strides(subdtype.shape, subdtype.subdtype, arr.order) final_shape = arr.shape + subdtype.shape - final_strides = arr.get_strides()[:] - final_strides += strides - final_backstrides = arr.get_backstrides()[:] - final_backstrides += backstrides + final_strides = arr.get_strides() + strides + final_backstrides = arr.get_backstrides() + backstrides final_dtype = subdtype print self.name,'strides',arr.get_strides(),strides if subdtype.subdtype: From noreply at buildbot.pypy.org Thu Nov 21 23:52:01 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 21 Nov 2013 23:52:01 +0100 (CET) Subject: [pypy-commit] pypy voidtype_strformat: fix merge Message-ID: <20131121225201.E1BF61C31F8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: voidtype_strformat Changeset: r68273:1ac497fd9a90 Date: 2013-11-22 00:50 +0200 http://bitbucket.org/pypy/pypy/changeset/1ac497fd9a90/ Log: fix merge diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -36,11 +36,7 @@ return backstrides def getitem(self, index): - from pypy.module.micronumpy.types import VoidType - it = self.dtype.itemtype - if isinstance(it, VoidType): - return it.readarray(self, index, 0, self.dtype) - return it.read(self, index, 0) + return self.dtype.itemtype.read(self, index, 0) def getitem_bool(self, index): return self.dtype.itemtype.read_bool(self, index, 0) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -429,13 +429,7 @@ def find_binop_result_dtype(space, dt1, dt2, promote_to_float=False, promote_bools=False): # dt1.num should be <= dt2.num - dt1num = dt1.num - dt2num = dt2.num - if dt1num == 20: #void type - dt1num = dt1.subdtype.num - if dt2num == 20: - dt2num = dt2.subdtype.num - if dt1num > dt2num: + if dt1.num > dt2.num: dt1, dt2 = dt2, dt1 # Some operations promote op(bool, bool) to return int8, rather than bool if promote_bools and (dt1.kind == dt2.kind == NPY_GENBOOLLTR): @@ -443,11 +437,11 @@ # Everything numeric promotes to complex if dt2.is_complex_type() or dt1.is_complex_type(): - if dt2num == NPY_CFLOAT: + if dt2.num == NPY_CFLOAT: return interp_dtype.get_dtype_cache(space).w_complex64dtype - elif dt2num == NPY_CDOUBLE: + elif dt2.num == NPY_CDOUBLE: return interp_dtype.get_dtype_cache(space).w_complex128dtype - elif dt2num == NPY_CLONGDOUBLE: + elif dt2.num == NPY_CLONGDOUBLE: return interp_dtype.get_dtype_cache(space).w_complexlongdtype else: raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) @@ -461,7 +455,7 @@ # Everything promotes to float, and bool promotes to everything. if dt2.kind == NPY_FLOATINGLTR or dt1.kind == NPY_GENBOOLLTR: # Float32 + 8-bit int = Float64 - if dt2num == NPY_FLOAT and dt1.itemtype.get_element_size() >= 4: + if dt2.num == NPY_FLOAT and dt1.itemtype.get_element_size() >= 4: return interp_dtype.get_dtype_cache(space).w_float64dtype return dt2 @@ -471,8 +465,8 @@ if dt1.itemtype.get_element_size() < dt2.itemtype.get_element_size(): return dt2 # we need to promote both dtypes - dtypenum = dt2num + 2 - elif dt2num == NPY_ULONGLONG or (LONG_BIT == 64 and dt2num == NPY_ULONG): + dtypenum = dt2.num + 2 + elif dt2.num == NPY_ULONGLONG or (LONG_BIT == 64 and dt2.num == NPY_ULONG): # UInt64 + signed = Float64 dtypenum = NPY_DOUBLE elif dt2.is_flexible_type(): @@ -487,7 +481,7 @@ return dt2 else: # increase to the next signed type - dtypenum = dt2num + 1 + dtypenum = dt2.num + 1 newdtype = interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum] if (newdtype.itemtype.get_element_size() > dt2.itemtype.get_element_size() or From noreply at buildbot.pypy.org Fri Nov 22 00:02:39 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 22 Nov 2013 00:02:39 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: remove nextra argument from rawshape() Message-ID: <20131121230239.89F1E1C3225@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68274:9f4d8f565ab1 Date: 2013-11-19 08:25 +0000 http://bitbucket.org/pypy/pypy/changeset/9f4d8f565ab1/ Log: remove nextra argument from rawshape() diff --git a/rpython/annotator/argument.py b/rpython/annotator/argument.py --- a/rpython/annotator/argument.py +++ b/rpython/annotator/argument.py @@ -175,15 +175,15 @@ data_w.append(self.w_stararg) return (shape_cnt, shape_keys, shape_star), data_w - def _rawshape(self, nextra=0): - shape_cnt = len(self.arguments_w) + nextra # Number of positional args + def _rawshape(self): + shape_cnt = len(self.arguments_w) shape_keys = tuple(sorted(self.keywords)) shape_star = self.w_stararg is not None # Flag: presence of *arg return shape_cnt, shape_keys, shape_star -def rawshape(args, nextra=0): - return args._rawshape(nextra) +def rawshape(args): + return args._rawshape() # diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -879,11 +879,12 @@ self.name, flags) + @staticmethod def consider_call_site(bookkeeper, family, descs, args, s_result, op): - shape = rawshape(args, nextra=1) # account for the extra 'self' + cnt, keys, star = rawshape(args) + shape = cnt + 1, keys, star # account for the extra 'self' row = FunctionDesc.row_to_consider(descs, args, op) family.calltable_add_row(shape, row) - consider_call_site = staticmethod(consider_call_site) def rowkey(self): # we are computing call families and call tables that always contain @@ -1039,11 +1040,12 @@ args = args.prepend(s_self) return self.funcdesc.pycall(schedule, args, s_previous_result, op) + @staticmethod def consider_call_site(bookkeeper, family, descs, args, s_result, op): - shape = rawshape(args, nextra=1) # account for the extra 'self' + cnt, keys, star = rawshape(args) + shape = cnt + 1, keys, star # account for the extra 'self' row = FunctionDesc.row_to_consider(descs, args, op) family.calltable_add_row(shape, row) - consider_call_site = staticmethod(consider_call_site) def rowkey(self): return self.funcdesc diff --git a/rpython/annotator/test/test_argument.py b/rpython/annotator/test/test_argument.py --- a/rpython/annotator/test/test_argument.py +++ b/rpython/annotator/test/test_argument.py @@ -67,9 +67,6 @@ args = MockArgs([1, 2, 3]) assert rawshape(args) == (3, (), False) - args = MockArgs([1]) - assert rawshape(args, 2) == (3, (), False) - args = MockArgs([1, 2, 3, 4, 5]) assert rawshape(args) == (5, (), False) From noreply at buildbot.pypy.org Fri Nov 22 00:02:40 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 22 Nov 2013 00:02:40 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: Move ._rawshape() to CallSpec Message-ID: <20131121230240.AA0D21C3225@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68275:f62d937bb4ec Date: 2013-11-19 08:41 +0000 http://bitbucket.org/pypy/pypy/changeset/f62d937bb4ec/ Log: Move ._rawshape() to CallSpec diff --git a/rpython/annotator/argument.py b/rpython/annotator/argument.py --- a/rpython/annotator/argument.py +++ b/rpython/annotator/argument.py @@ -167,20 +167,6 @@ return cls(args_w, dict(zip(shape_keys, data_w[shape_cnt:end_keys])), w_star) - def flatten(self): - """ Argument <-> list of w_objects together with "shape" information """ - shape_cnt, shape_keys, shape_star = self._rawshape() - data_w = self.arguments_w + [self.keywords[key] for key in shape_keys] - if shape_star: - data_w.append(self.w_stararg) - return (shape_cnt, shape_keys, shape_star), data_w - - def _rawshape(self): - shape_cnt = len(self.arguments_w) - shape_keys = tuple(sorted(self.keywords)) - shape_star = self.w_stararg is not None # Flag: presence of *arg - return shape_cnt, shape_keys, shape_star - def rawshape(args): return args._rawshape() diff --git a/rpython/annotator/test/test_argument.py b/rpython/annotator/test/test_argument.py --- a/rpython/annotator/test/test_argument.py +++ b/rpython/annotator/test/test_argument.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import py from rpython.annotator.argument import ArgumentsForTranslation, rawshape -from rpython.flowspace.argument import Signature +from rpython.flowspace.argument import Signature, CallSpec class MockArgs(ArgumentsForTranslation): def newtuple(self, items): @@ -82,28 +82,6 @@ args = MockArgs([1, 2, 3, 4, 5], {'e': 5, 'd': 7}) assert rawshape(args) == (5, ('d', 'e'), False) - def test_flatten(self): - args = MockArgs([1, 2, 3]) - assert args.flatten() == ((3, (), False), [1, 2, 3]) - - args = MockArgs([1]) - assert args.flatten() == ((1, (), False), [1]) - - args = MockArgs([1, 2, 3, 4, 5]) - assert args.flatten() == ((5, (), False), [1, 2, 3, 4, 5]) - - args = MockArgs([1], {'c': 3, 'b': 2}) - assert args.flatten() == ((1, ('b', 'c'), False), [1, 2, 3]) - - args = MockArgs([1], {'c': 5}) - assert args.flatten() == ((1, ('c', ), False), [1, 5]) - - args = MockArgs([1], {'c': 5, 'd': 7}) - assert args.flatten() == ((1, ('c', 'd'), False), [1, 5, 7]) - - args = MockArgs([1, 2, 3, 4, 5], {'e': 5, 'd': 7}) - assert args.flatten() == ((5, ('d', 'e'), False), [1, 2, 3, 4, 5, 7, 5]) - def test_stararg_flowspace_variable(self): var = object() shape = ((2, ('g', ), True), [1, 2, 9, var]) diff --git a/rpython/flowspace/argument.py b/rpython/flowspace/argument.py --- a/rpython/flowspace/argument.py +++ b/rpython/flowspace/argument.py @@ -93,14 +93,18 @@ def flatten(self): """ Argument <-> list of w_objects together with "shape" information """ - shape_cnt = len(self.arguments_w) # Number of positional args - shape_keys = tuple(sorted(self.keywords)) - shape_star = self.w_stararg is not None # Flag: presence of *arg + shape_cnt, shape_keys, shape_star = self._rawshape() data_w = self.arguments_w + [self.keywords[key] for key in shape_keys] if shape_star: data_w.append(self.w_stararg) return (shape_cnt, shape_keys, shape_star), data_w + def _rawshape(self): + shape_cnt = len(self.arguments_w) + shape_keys = tuple(sorted(self.keywords)) + shape_star = self.w_stararg is not None # Flag: presence of *arg + return shape_cnt, shape_keys, shape_star + def as_list(self): assert not self.keywords if self.w_stararg is None: diff --git a/rpython/flowspace/test/test_argument.py b/rpython/flowspace/test/test_argument.py --- a/rpython/flowspace/test/test_argument.py +++ b/rpython/flowspace/test/test_argument.py @@ -1,6 +1,4 @@ -# -*- coding: utf-8 -*- -import py -from rpython.flowspace.argument import Signature +from rpython.flowspace.argument import Signature, CallSpec class TestSignature(object): @@ -49,3 +47,27 @@ assert x == ["a", "b", "c"] assert y == "d" assert z == "e" + + +def test_flatten_CallSpec(): + args = CallSpec([1, 2, 3]) + assert args.flatten() == ((3, (), False), [1, 2, 3]) + + args = CallSpec([1]) + assert args.flatten() == ((1, (), False), [1]) + + args = CallSpec([1, 2, 3, 4, 5]) + assert args.flatten() == ((5, (), False), [1, 2, 3, 4, 5]) + + args = CallSpec([1], {'c': 3, 'b': 2}) + assert args.flatten() == ((1, ('b', 'c'), False), [1, 2, 3]) + + args = CallSpec([1], {'c': 5}) + assert args.flatten() == ((1, ('c', ), False), [1, 5]) + + args = CallSpec([1], {'c': 5, 'd': 7}) + assert args.flatten() == ((1, ('c', 'd'), False), [1, 5, 7]) + + args = CallSpec([1, 2, 3, 4, 5], {'e': 5, 'd': 7}) + assert args.flatten() == ((5, ('d', 'e'), False), [1, 2, 3, 4, 5, 7, 5]) + From noreply at buildbot.pypy.org Fri Nov 22 00:02:41 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 22 Nov 2013 00:02:41 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: Update tests to use op.* instead of raw SpaceOperations Message-ID: <20131121230241.D4E891C3225@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68276:8bfdeae1f646 Date: 2013-11-21 23:31 +0100 http://bitbucket.org/pypy/pypy/changeset/8bfdeae1f646/ Log: Update tests to use op.* instead of raw SpaceOperations diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -15,6 +15,7 @@ from rpython.rlib.rarithmetic import r_singlefloat from rpython.rlib import objectmodel from rpython.flowspace.objspace import build_flow, FlowingError +from rpython.flowspace.operation import op from rpython.translator.test import snippet @@ -69,12 +70,11 @@ return x+1 """ x = Variable("x") - result = Variable("result") - op = SpaceOperation("add", [x, Constant(1)], result) + oper = op.add(x, Constant(1)) block = Block([x]) fun = FunctionGraph("f", block) - block.operations.append(op) - block.closeblock(Link([result], fun.returnblock)) + block.operations.append(oper) + block.closeblock(Link([oper.result], fun.returnblock)) a = self.RPythonAnnotator() a.addpendingblock(fun, fun.startblock, [annmodel.SomeInteger()]) a.complete() @@ -90,20 +90,18 @@ """ i1 = Variable("i1") i2 = Variable("i2") - i3 = Variable("i3") - conditionres = Variable("conditionres") - conditionop = SpaceOperation("gt", [i1, Constant(0)], conditionres) - decop = SpaceOperation("add", [i2, Constant(-1)], i3) + conditionop = op.gt(i1, Constant(0)) + decop = op.add(i2, Constant(-1)) headerblock = Block([i1]) whileblock = Block([i2]) fun = FunctionGraph("f", headerblock) headerblock.operations.append(conditionop) - headerblock.exitswitch = conditionres + headerblock.exitswitch = conditionop.result headerblock.closeblock(Link([i1], fun.returnblock, False), Link([i1], whileblock, True)) whileblock.operations.append(decop) - whileblock.closeblock(Link([i3], headerblock)) + whileblock.closeblock(Link([decop.result], headerblock)) a = self.RPythonAnnotator() a.addpendingblock(fun, fun.startblock, [annmodel.SomeInteger()]) @@ -123,15 +121,12 @@ i1 = Variable("i1") i2 = Variable("i2") i3 = Variable("i3") - i4 = Variable("i4") sum2 = Variable("sum2") sum3 = Variable("sum3") - sum4 = Variable("sum4") - - conditionres = Variable("conditionres") - conditionop = SpaceOperation("gt", [i2, Constant(0)], conditionres) - decop = SpaceOperation("add", [i3, Constant(-1)], i4) - addop = SpaceOperation("add", [i3, sum3], sum4) + + conditionop = op.gt(i2, Constant(0)) + decop = op.add(i3, Constant(-1)) + addop = op.add(i3, sum3) startblock = Block([i1]) headerblock = Block([i2, sum2]) whileblock = Block([i3, sum3]) @@ -139,12 +134,12 @@ fun = FunctionGraph("f", startblock) startblock.closeblock(Link([i1, Constant(0)], headerblock)) headerblock.operations.append(conditionop) - headerblock.exitswitch = conditionres + headerblock.exitswitch = conditionop.result headerblock.closeblock(Link([sum2], fun.returnblock, False), Link([i2, sum2], whileblock, True)) whileblock.operations.append(addop) whileblock.operations.append(decop) - whileblock.closeblock(Link([i4, sum4], headerblock)) + whileblock.closeblock(Link([decop.result, addop.result], headerblock)) a = self.RPythonAnnotator() a.addpendingblock(fun, fun.startblock, [annmodel.SomeInteger()]) From noreply at buildbot.pypy.org Fri Nov 22 00:02:43 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 22 Nov 2013 00:02:43 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: do the dispatch of consider_op() inside a method of SpaceOperation Message-ID: <20131121230243.016591C3225@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68277:2b2a6f11274e Date: 2013-11-21 23:33 +0100 http://bitbucket.org/pypy/pypy/changeset/2b2a6f11274e/ Log: do the dispatch of consider_op() inside a method of SpaceOperation diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -584,10 +584,6 @@ def consider_op(self, block, opindex): op = block.operations[opindex] argcells = [self.binding(a) for a in op.args] - consider_meth = getattr(self,'consider_op_'+op.opname, - None) - if not consider_meth: - raise Exception,"unknown op: %r" % op # let's be careful about avoiding propagated SomeImpossibleValues # to enter an op; the latter can result in violations of the @@ -599,7 +595,7 @@ if isinstance(arg, annmodel.SomeImpossibleValue): raise BlockedInference(self, op, opindex) try: - resultcell = consider_meth(*argcells) + resultcell = op.consider(self, *argcells) except annmodel.AnnotatorError as e: # note that UnionError is a subclass graph = self.bookkeeper.position_key[0] e.source = gather_error(self, graph, block, opindex) diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py --- a/rpython/flowspace/model.py +++ b/rpython/flowspace/model.py @@ -439,6 +439,12 @@ newresult = mapping.get(self.result, self.result) return type(self)(self.opname, newargs, newresult, self.offset) + def consider(self, annotator, *argcells): + consider_meth = getattr(annotator, 'consider_op_' + self.opname, None) + if not consider_meth: + raise Exception("unknown op: %r" % op) + return consider_meth(*argcells) + class Atom(object): def __init__(self, name): self.__name__ = name # make save_global happy From noreply at buildbot.pypy.org Fri Nov 22 00:02:44 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 22 Nov 2013 00:02:44 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: fix translation of open() and os.tmpfile() Message-ID: <20131121230244.298CD1C3225@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68278:8c558fb814b7 Date: 2013-11-21 23:58 +0100 http://bitbucket.org/pypy/pypy/changeset/8c558fb814b7/ Log: fix translation of open() and os.tmpfile() diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -47,14 +47,12 @@ @register_flow_sc(open) def sc_open(space, *args_w): from rpython.rlib.rfile import create_file - - return space.frame.do_operation("simple_call", const(create_file), *args_w) + return space.appcall(create_file, *args_w) @register_flow_sc(os.tmpfile) def sc_os_tmpfile(space): from rpython.rlib.rfile import create_temp_rfile - - return space.frame.do_operation("simple_call", const(create_temp_rfile)) + return space.appcall(create_temp_rfile) # _________________________________________________________________________ # a simplified version of the basic printing routines, for RPython programs From noreply at buildbot.pypy.org Fri Nov 22 13:07:53 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 22 Nov 2013 13:07:53 +0100 (CET) Subject: [pypy-commit] pypy voidtype_strformat: test, implement 90% of VoidType to_builtin_type Message-ID: <20131122120753.4425D1C0356@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: voidtype_strformat Changeset: r68280:61f2c6a378fd Date: 2013-11-22 14:06 +0200 http://bitbucket.org/pypy/pypy/changeset/61f2c6a378fd/ Log: test, implement 90% of VoidType to_builtin_type diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3109,6 +3109,9 @@ [[7, 8, 9], [10, 11, 12]]])], dtype=dt) s = str(a) + i = a.item() + assert isinstance(i, tuple) + assert len(i) == 4 skip('incorrect formatting via dump_data') assert s.endswith("[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " "[[7, 8, 9], [10, 11, 12]]])]") diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1800,6 +1800,28 @@ arr = self.readarray(box.arr, box.ofs, 0, box.dtype) return arr.dump_data(prefix='', suffix='') + def to_builtin_type(self, space, item): + ''' From the documentation of ndarray.item(): + "Void arrays return a buffer object for item(), + unless fields are defined, in which case a tuple is returned." + ''' + dt = item.arr.dtype + tpl =() + for name in dt.fieldnames: + ofs, dtype = dt.fields[name] + if isinstance(dtype.itemtype, VoidType): + read_val = dtype.itemtype.readarray(item.arr, ofs, 0, dtype) + else: + read_val = dtype.itemtype.read(item.arr, ofs, 0, dtype) + if isinstance (read_val, interp_boxes.W_StringBox): + # StringType returns a str + read_val = space.wrap(dtype.itemtype.to_str(read_val)) + tpl = tpl + (read_val,) + if len(tpl) == 0: + raise OperationError(space.w_NotImplementedError, space.wrap( + "item() for Void aray with no fields not implemented")) + return space.wrap(tpl) + class RecordType(FlexibleType): T = lltype.Char From noreply at buildbot.pypy.org Fri Nov 22 13:00:24 2013 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 22 Nov 2013 13:00:24 +0100 (CET) Subject: [pypy-commit] pypy default: fix the annotation of prebuilt ordereddict Message-ID: <20131122120024.1EA9D1C0352@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r68279:ffd24a3a9c70 Date: 2013-11-22 12:59 +0100 http://bitbucket.org/pypy/pypy/changeset/ffd24a3a9c70/ Log: fix the annotation of prebuilt ordereddict diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -371,15 +371,19 @@ listdef.generalize(self.immutablevalue(e, False)) result = SomeList(listdef) elif tp is dict or tp is r_dict or tp is SomeOrderedDict.knowntype: + if tp is SomeOrderedDict.knowntype: + cls = SomeOrderedDict + else: + cls = SomeDict if need_const: key = Constant(x) try: return self.immutable_cache[key] except KeyError: - result = SomeDict(DictDef(self, - s_ImpossibleValue, - s_ImpossibleValue, - is_r_dict = tp is r_dict)) + result = cls(DictDef(self, + s_ImpossibleValue, + s_ImpossibleValue, + is_r_dict = tp is r_dict)) self.immutable_cache[key] = result if tp is r_dict: s_eqfn = self.immutablevalue(x.key_eq) @@ -412,10 +416,7 @@ dictdef.generalize_key(self.immutablevalue(ek, False)) dictdef.generalize_value(self.immutablevalue(ev, False)) dictdef.seen_prebuilt_key(ek) - if tp is SomeOrderedDict.knowntype: - result = SomeOrderedDict(dictdef) - else: - result = SomeDict(dictdef) + result = cls(dictdef) elif tp is weakref.ReferenceType: x1 = x() if x1 is None: diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4148,6 +4148,19 @@ a.build_types(f, [str, str]) assert ("format() is not RPython" in exc.value.msg) + def test_prebuilt_ordered_dict(self): + try: + from collections import OrderedDict + except ImportError: + py.test.skip("Please upgrade to python 2.7") + d = OrderedDict([("aa", 1)]) + + def f(): + return d + + a = self.RPythonAnnotator() + assert isinstance(a.build_types(f, []), annmodel.SomeOrderedDict) + def g(n): return [0, 1, 2, n] diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py --- a/rpython/translator/c/test/test_genc.py +++ b/rpython/translator/c/test/test_genc.py @@ -574,6 +574,22 @@ fn = compile(chooser, [bool]) assert fn(True) +def test_ordered_dict(): + try: + from collections import OrderedDict + except ImportError: + py.test.skip("Please update to Python 2.7") + + expected = [('ea', 1), ('bb', 2), ('c', 3), ('d', 4), ('e', 5), + ('ef', 6)] + d = OrderedDict(expected) + + def f(): + assert d.items() == expected + + fn = compile(f, []) + fn() + def test_inhibit_tail_call(): def foobar_fn(n): return 42 From noreply at buildbot.pypy.org Fri Nov 22 15:04:39 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 22 Nov 2013 15:04:39 +0100 (CET) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20131122140439.343F71C1352@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r68281:8aa12c92ab88 Date: 2013-11-22 14:59 +0100 http://bitbucket.org/pypy/pypy/changeset/8aa12c92ab88/ Log: hg merge default diff too long, truncating to 2000 out of 31207 lines diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,7 +1,38 @@ all: pypy-c +PYPY_EXECUTABLE := $(shell which pypy) +URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") + +ifeq ($(PYPY_EXECUTABLE),) +RUNINTERP = python +else +RUNINTERP = $(PYPY_EXECUTABLE) +endif + pypy-c: - @echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM" - @sleep 3 - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + @echo + @echo "====================================================================" +ifeq ($(PYPY_EXECUTABLE),) + @echo "Building a regular (jitting) version of PyPy, using CPython." + @echo "This takes around 2 hours and $(URAM) GB of RAM." + @echo "Note that pre-installing a PyPy binary would reduce this time" + @echo "and produce basically the same result." +else + @echo "Building a regular (jitting) version of PyPy, using" + @echo "$(PYPY_EXECUTABLE) to run the translation itself." + @echo "This takes up to 1 hour and $(URAM) GB of RAM." +endif + @echo + @echo "For more control (e.g. to use multiple CPU cores during part of" + @echo "the process) you need to run \`\`rpython/bin/rpython'' directly." + @echo "For more information see \`\`http://pypy.org/download.html''." + @echo "====================================================================" + @echo + @sleep 5 + $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + +# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are +# replaced with an opaque --jobserver option by the time this Makefile +# runs. We cannot get their original value either: +# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -1,5 +1,5 @@ ===================================== -PyPy: Python in Python Implementation +PyPy: Python in Python Implementation ===================================== Welcome to PyPy! @@ -26,12 +26,14 @@ Building ======== -build with:: +build with: - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py +.. code-block:: console + + $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest -to use virtualenv with the resulting pypy-c as the interpreter, you can +to use virtualenv with the resulting pypy-c as the interpreter; you can find more details about various installation schemes here: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -4,6 +4,21 @@ from __pypy__.builders import StringBuilder, UnicodeBuilder +class StringOrUnicodeBuilder(object): + def __init__(self): + self._builder = StringBuilder() + def append(self, string): + try: + self._builder.append(string) + except UnicodeEncodeError: + ub = UnicodeBuilder() + ub.append(self._builder.build()) + self._builder = ub + ub.append(string) + def build(self): + return self._builder.build() + + ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') @@ -192,7 +207,7 @@ if self.ensure_ascii: builder = StringBuilder() else: - builder = UnicodeBuilder() + builder = StringOrUnicodeBuilder() self.__encode(o, markers, builder, 0) return builder.build() diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: diff --git a/lib-python/2.7/string.py b/lib-python/2.7/string.py --- a/lib-python/2.7/string.py +++ b/lib-python/2.7/string.py @@ -66,16 +66,17 @@ must be of the same length. """ - if len(fromstr) != len(tostr): + n = len(fromstr) + if n != len(tostr): raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) + # this function has been rewritten to suit PyPy better; it is + # almost 10x faster than the original. + buf = bytearray(256) + for i in range(256): + buf[i] = i + for i in range(n): + buf[ord(fromstr[i])] = tostr[i] + return str(buf) diff --git a/lib-python/2.7/test/test_mailbox.py b/lib-python/2.7/test/test_mailbox.py --- a/lib-python/2.7/test/test_mailbox.py +++ b/lib-python/2.7/test/test_mailbox.py @@ -38,14 +38,9 @@ def _delete_recursively(self, target): # Delete a file or delete a directory recursively if os.path.isdir(target): - for path, dirs, files in os.walk(target, topdown=False): - for name in files: - os.remove(os.path.join(path, name)) - for name in dirs: - os.rmdir(os.path.join(path, name)) - os.rmdir(target) + test_support.rmtree(target) elif os.path.exists(target): - os.remove(target) + test_support.unlink(target) class TestMailbox(TestBase): @@ -137,6 +132,7 @@ msg = self._box.get(key1) self.assertEqual(msg['from'], 'foo') self.assertEqual(msg.fp.read(), '1') + msg.fp.close() def test_getitem(self): # Retrieve message using __getitem__() @@ -169,10 +165,14 @@ # Get file representations of messages key0 = self._box.add(self._template % 0) key1 = self._box.add(_sample_message) - self.assertEqual(self._box.get_file(key0).read().replace(os.linesep, '\n'), + msg0 = self._box.get_file(key0) + self.assertEqual(msg0.read().replace(os.linesep, '\n'), self._template % 0) - self.assertEqual(self._box.get_file(key1).read().replace(os.linesep, '\n'), + msg1 = self._box.get_file(key1) + self.assertEqual(msg1.read().replace(os.linesep, '\n'), _sample_message) + msg0.close() + msg1.close() def test_get_file_can_be_closed_twice(self): # Issue 11700 @@ -407,6 +407,7 @@ self._box.add(contents[0]) self._box.add(contents[1]) self._box.add(contents[2]) + oldbox = self._box method() if should_call_close: self._box.close() @@ -415,6 +416,7 @@ self.assertEqual(len(keys), 3) for key in keys: self.assertIn(self._box.get_string(key), contents) + oldbox.close() def test_dump_message(self): # Write message representations to disk @@ -1835,6 +1837,10 @@ def setUp(self): # create a new maildir mailbox to work with: self._dir = test_support.TESTFN + if os.path.isdir(self._dir): + test_support.rmtree(self._dir) + if os.path.isfile(self._dir): + test_support.unlink(self._dir) os.mkdir(self._dir) os.mkdir(os.path.join(self._dir, "cur")) os.mkdir(os.path.join(self._dir, "tmp")) @@ -1844,10 +1850,10 @@ def tearDown(self): map(os.unlink, self._msgfiles) - os.rmdir(os.path.join(self._dir, "cur")) - os.rmdir(os.path.join(self._dir, "tmp")) - os.rmdir(os.path.join(self._dir, "new")) - os.rmdir(self._dir) + test_support.rmdir(os.path.join(self._dir, "cur")) + test_support.rmdir(os.path.join(self._dir, "tmp")) + test_support.rmdir(os.path.join(self._dir, "new")) + test_support.rmdir(self._dir) def createMessage(self, dir, mbox=False): t = int(time.time() % 1000000) @@ -1883,7 +1889,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1891,7 +1899,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1900,8 +1910,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 2) - self.assertIsNot(self.mbox.next(), None) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1910,11 +1924,13 @@ import email.parser fname = self.createMessage("cur", True) n = 0 - for msg in mailbox.PortableUnixMailbox(open(fname), + fid = open(fname) + for msg in mailbox.PortableUnixMailbox(fid, email.parser.Parser().parse): n += 1 self.assertEqual(msg["subject"], "Simple Test") self.assertEqual(len(str(msg)), len(FROM_)+len(DUMMY_MESSAGE)) + fid.close() self.assertEqual(n, 1) ## End: classes from the original module (for backward compatibility). diff --git a/lib-python/2.7/test/test_mmap.py b/lib-python/2.7/test/test_mmap.py --- a/lib-python/2.7/test/test_mmap.py +++ b/lib-python/2.7/test/test_mmap.py @@ -11,7 +11,7 @@ def setUp(self): if os.path.exists(TESTFN): - os.unlink(TESTFN) + unlink(TESTFN) def tearDown(self): try: diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # diff --git a/lib-python/2.7/test/test_old_mailbox.py b/lib-python/2.7/test/test_old_mailbox.py --- a/lib-python/2.7/test/test_old_mailbox.py +++ b/lib-python/2.7/test/test_old_mailbox.py @@ -73,7 +73,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -81,7 +83,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -90,8 +94,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 2) - self.assertTrue(self.mbox.next() is not None) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -75,7 +75,7 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + open(name, "w").close() self.files.append(name) def test_tempnam(self): diff --git a/lib-python/2.7/test/test_support.py b/lib-python/2.7/test/test_support.py --- a/lib-python/2.7/test/test_support.py +++ b/lib-python/2.7/test/test_support.py @@ -179,15 +179,79 @@ except KeyError: pass +if sys.platform.startswith("win"): + def _waitfor(func, pathname, waitall=False): + # Peform the operation + func(pathname) + # Now setup the wait loop + if waitall: + dirname = pathname + else: + dirname, name = os.path.split(pathname) + dirname = dirname or '.' + # Check for `pathname` to be removed from the filesystem. + # The exponential backoff of the timeout amounts to a total + # of ~1 second after which the deletion is probably an error + # anyway. + # Testing on a i7 at 4.3GHz shows that usually only 1 iteration is + # required when contention occurs. + timeout = 0.001 + while timeout < 1.0: + # Note we are only testing for the existance of the file(s) in + # the contents of the directory regardless of any security or + # access rights. If we have made it this far, we have sufficient + # permissions to do that much using Python's equivalent of the + # Windows API FindFirstFile. + # Other Windows APIs can fail or give incorrect results when + # dealing with files that are pending deletion. + L = os.listdir(dirname) + if not (L if waitall else name in L): + return + # Increase the timeout and try again + time.sleep(timeout) + timeout *= 2 + warnings.warn('tests may fail, delete still pending for ' + pathname, + RuntimeWarning, stacklevel=4) + + def _unlink(filename): + _waitfor(os.unlink, filename) + + def _rmdir(dirname): + _waitfor(os.rmdir, dirname) + + def _rmtree(path): + def _rmtree_inner(path): + for name in os.listdir(path): + fullname = os.path.join(path, name) + if os.path.isdir(fullname): + _waitfor(_rmtree_inner, fullname, waitall=True) + os.rmdir(fullname) + else: + os.unlink(fullname) + _waitfor(_rmtree_inner, path, waitall=True) + _waitfor(os.rmdir, path) +else: + _unlink = os.unlink + _rmdir = os.rmdir + _rmtree = shutil.rmtree + def unlink(filename): try: - os.unlink(filename) + _unlink(filename) except OSError: pass +def rmdir(dirname): + try: + _rmdir(dirname) + except OSError as error: + # The directory need not exist. + if error.errno != errno.ENOENT: + raise + def rmtree(path): try: - shutil.rmtree(path) + _rmtree(path) except OSError, e: # Unix returns ENOENT, Windows returns ESRCH. if e.errno not in (errno.ENOENT, errno.ESRCH): diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/2.7/test/test_tarfile.py --- a/lib-python/2.7/test/test_tarfile.py +++ b/lib-python/2.7/test/test_tarfile.py @@ -300,26 +300,21 @@ def test_extract_hardlink(self): # Test hardlink extraction (e.g. bug #857297). - tar = tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") + with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar: + tar.extract("ustar/regtype", TEMPDIR) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/regtype")) - tar.extract("ustar/regtype", TEMPDIR) - try: tar.extract("ustar/lnktype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("hardlink not extracted properly") + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/lnktype")) + with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) - data = open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) - - try: tar.extract("ustar/symtype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("symlink not extracted properly") - - data = open(os.path.join(TEMPDIR, "ustar/symtype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/symtype")) + with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) def test_extractall(self): # Test if extractall() correctly restores directory permissions @@ -340,7 +335,7 @@ # constructor in case of an error. For the test we rely on # the fact that opening an empty file raises a ReadError. empty = os.path.join(TEMPDIR, "empty") - open(empty, "wb").write("") + open(empty, "wb").close() try: tar = object.__new__(tarfile.TarFile) @@ -351,7 +346,7 @@ else: self.fail("ReadError not raised") finally: - os.remove(empty) + test_support.unlink(empty) class StreamReadTest(CommonReadTest): @@ -1327,7 +1322,7 @@ def setUp(self): self.tarname = tmpname if os.path.exists(self.tarname): - os.remove(self.tarname) + test_support.unlink(self.tarname) def _add_testfile(self, fileobj=None): tar = tarfile.open(self.tarname, "a", fileobj=fileobj) diff --git a/lib-python/2.7/traceback.py b/lib-python/2.7/traceback.py --- a/lib-python/2.7/traceback.py +++ b/lib-python/2.7/traceback.py @@ -107,7 +107,7 @@ return list -def print_exception(etype, value, tb, limit=None, file=None): +def print_exception(etype, value, tb, limit=None, file=None, _encoding=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if @@ -123,7 +123,7 @@ if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) - lines = format_exception_only(etype, value) + lines = format_exception_only(etype, value, _encoding) for line in lines: _print(file, line, '') @@ -144,7 +144,7 @@ list = list + format_exception_only(etype, value) return list -def format_exception_only(etype, value): +def format_exception_only(etype, value, _encoding=None): """Format the exception part of a traceback. The arguments are the exception type and value such as given by @@ -170,12 +170,12 @@ if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or etype is None or type(etype) is str): - return [_format_final_exc_line(etype, value)] + return [_format_final_exc_line(etype, value, _encoding)] stype = etype.__name__ if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] + return [_format_final_exc_line(stype, value, _encoding)] # It was a syntax error; show exactly where the problem was found. lines = [] @@ -196,26 +196,26 @@ lines.append(' %s^\n' % ''.join(caretspace)) value = msg - lines.append(_format_final_exc_line(stype, value)) + lines.append(_format_final_exc_line(stype, value, _encoding)) return lines -def _format_final_exc_line(etype, value): +def _format_final_exc_line(etype, value, _encoding=None): """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) + valuestr = _some_str(value, _encoding) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line -def _some_str(value): +def _some_str(value, _encoding=None): try: return str(value) except Exception: pass try: value = unicode(value) - return value.encode("ascii", "backslashreplace") + return value.encode(_encoding or "ascii", "backslashreplace") except Exception: pass return '' % type(value).__name__ diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -48,7 +48,14 @@ def remove(wr, selfref=ref(self)): self = selfref() if self is not None: - del self.data[wr.key] + # Changed this for PyPy: made more resistent. The + # issue is that in some corner cases, self.data + # might already be changed or removed by the time + # this weakref's callback is called. If that is + # the case, we don't want to randomly kill an + # unrelated entry. + if self.data.get(wr.key) is wr: + del self.data[wr.key] self._remove = remove UserDict.UserDict.__init__(self, *args, **kw) @@ -160,22 +167,26 @@ try: o = self.data.pop(key)() except KeyError: + o = None + if o is None: if args: return args[0] - raise - if o is None: raise KeyError, key else: return o + # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: - wr = self.data[key] + o = self.data[key]() except KeyError: + o = None + if o is None: self.data[key] = KeyedRef(default, self._remove, key) return default else: - return wr() + return o + # The logic above was fixed in PyPy def update(self, dict=None, **kwargs): d = self.data diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -268,10 +268,18 @@ if _has_load_extension(): _ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);") -_lib = _ffi.verify(""" -#include -""", libraries=['sqlite3'] -) +if sys.platform.startswith('freebsd'): + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'] + ) +else: + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'] + ) exported_sqlite_symbols = [ 'SQLITE_ALTER_TABLE', diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -112,6 +112,14 @@ incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform.startswith("freebsd"): + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include'] + linklibs = ['tk86', 'tcl86'] + libdirs = ['/usr/local/lib'] +elif sys.platform == 'win32': + incdirs = [] + linklibs = ['tcl85', 'tk85'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.7 +Version: 0.8 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7.2" -__version_info__ = (0, 7, 2) +__version__ = "0.8" +__version_info__ = (0, 8) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,5 @@ import types +from .lock import allocate_lock try: callable @@ -61,6 +62,7 @@ # rely on it! It's probably not going to work well.) self._backend = backend + self._lock = allocate_lock() self._parser = cparser.Parser() self._cached_btypes = {} self._parsed_types = types.ModuleType('parsed_types').__dict__ @@ -74,7 +76,8 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - self.BVoidP = self._get_cached_btype(model.voidp_type) + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -95,11 +98,12 @@ if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') - self._parser.parse(csource, override=override) - self._cdefsources.append(csource) - if override: - for cache in self._function_caches: - cache.clear() + with self._lock: + self._parser.parse(csource, override=override) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -109,31 +113,47 @@ library we only look for the actual (untyped) symbols. """ assert isinstance(name, basestring) or name is None - lib, function_cache = _make_ffi_library(self, name, flags) - self._function_caches.append(function_cache) - self._libraries.append(lib) + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) return lib + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + really_a_function_type = type.is_raw_function + if really_a_function_type: + type = type.as_function_pointer() + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: - btype, cfaf = self._parsed_types[cdecl] - if consider_function_as_funcptr and not cfaf: - raise KeyError + result = self._parsed_types[cdecl] except KeyError: - key = cdecl - if not isinstance(cdecl, str): # unicode, on Python 2 - cdecl = cdecl.encode('ascii') - cfaf = consider_function_as_funcptr - type = self._parser.parse_type(cdecl, - consider_function_as_funcptr=cfaf) - btype = self._get_cached_btype(type) - self._parsed_types[key] = btype, cfaf + with self._lock: + result = self._typeof_locked(cdecl) + # + btype, really_a_function_type = result + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) return btype def typeof(self, cdecl): """Parse the C type given as a string and return the - corresponding Python type: '>. + corresponding object. It can also be used on 'cdata' instance to get its C type. """ if isinstance(cdecl, basestring): @@ -144,6 +164,10 @@ res = _builtin_function_type(cdecl) if res is not None: return res + if (isinstance(cdecl, types.FunctionType) + and hasattr(cdecl, '_cffi_base_type')): + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) def sizeof(self, cdecl): @@ -280,14 +304,17 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + with self._lock: + try: + gc_weakrefs = self.gc_weakrefs + except AttributeError: + from .gc_weakref import GcWeakrefs + gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) + return gc_weakrefs.build(cdata, destructor) def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! try: BType = self._cached_btypes[type] except KeyError: @@ -320,9 +347,13 @@ errno = property(_get_errno, _set_errno, None, "the value of 'errno' from/to the C calls") + def getwinerror(self, code=-1): + return self._backend.getwinerror(code) + def _pointer_to(self, ctype): from . import model - return model.pointer_cache(self, ctype) + with self._lock: + return model.pointer_cache(self, ctype) def addressof(self, cdata, field=None): """Return the address of a . @@ -342,10 +373,12 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ - self._parser.include(ffi_to_include._parser) - self._cdefsources.append('[') - self._cdefsources.extend(ffi_to_include._cdefsources) - self._cdefsources.append(']') + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) @@ -372,7 +405,7 @@ backendlib = backend.load_library(path, flags) copied_enums = [] # - def make_accessor(name): + def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: tp = ffi._parser._declarations[key] @@ -404,11 +437,17 @@ if enumname not in library.__dict__: library.__dict__[enumname] = enumval copied_enums.append(True) + if name in library.__dict__: + return # - if name in library.__dict__: # copied from an enum value just above, - return # or multithread's race condition raise AttributeError(name) # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + make_accessor_locked(name) + # class FFILibrary(object): def __getattr__(self, name): make_accessor(name) @@ -444,4 +483,5 @@ except (KeyError, AttributeError, TypeError): return None else: - return ffi._get_cached_btype(tp) + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -142,7 +142,7 @@ if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] if line: - msg = 'cannot parse "%s"\n%s' % (line, msg) + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) @@ -217,19 +217,18 @@ # if decl.name: tp = self._get_type(node, partial_length_ok=True) - if self._is_constant_declaration(node): + if self._is_constant_globalvar(node): self._declare('constant ' + decl.name, tp) else: self._declare('variable ' + decl.name, tp) - def parse_type(self, cdecl, consider_function_as_funcptr=False): + def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type, - consider_function_as_funcptr=consider_function_as_funcptr) + return self._get_type(exprnode.type) def _declare(self, name, obj): if name in self._declarations: @@ -249,28 +248,17 @@ return model.ConstPointerType(type) return model.PointerType(type) - def _get_type(self, typenode, convert_array_to_pointer=False, - name=None, partial_length_ok=False, - consider_function_as_funcptr=False): + def _get_type(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): type = self._declarations['typedef ' + typenode.type.names[0]] - if isinstance(type, model.ArrayType): - if convert_array_to_pointer: - return type.item - else: - if (consider_function_as_funcptr and - isinstance(type, model.RawFunctionType)): - return type.as_function_pointer() return type # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type - if convert_array_to_pointer: - return self._get_type_pointer(self._get_type(typenode.type)) if typenode.dim is None: length = None else: @@ -331,10 +319,7 @@ # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - result = self._parse_function_type(typenode, name) - if consider_function_as_funcptr: - result = result.as_function_pointer() - return result + return self._parse_function_type(typenode, name) # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): @@ -365,21 +350,24 @@ isinstance(params[0].type.type, pycparser.c_ast.IdentifierType) and list(params[0].type.type.names) == ['void']): del params[0] - args = [self._get_type(argdeclnode.type, - convert_array_to_pointer=True, - consider_function_as_funcptr=True) + args = [self._as_func_arg(self._get_type(argdeclnode.type)) for argdeclnode in params] result = self._get_type(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _is_constant_declaration(self, typenode, const=False): - if isinstance(typenode, pycparser.c_ast.ArrayDecl): - return self._is_constant_declaration(typenode.type) + def _as_func_arg(self, type): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _is_constant_globalvar(self, typenode): if isinstance(typenode, pycparser.c_ast.PtrDecl): - const = 'const' in typenode.quals - return self._is_constant_declaration(typenode.type, const) + return 'const' in typenode.quals if isinstance(typenode, pycparser.c_ast.TypeDecl): - return const or 'const' in typenode.quals + return 'const' in typenode.quals return False def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): @@ -491,7 +479,7 @@ return tp def _make_partial(self, tp, nested): - if not isinstance(tp, model.StructType): + if not isinstance(tp, model.StructOrUnion): raise api.CDefError("%s cannot be partial" % (tp,)) if not tp.has_c_name() and not nested: raise NotImplementedError("%s is partial but has no C name" %(tp,)) @@ -511,7 +499,7 @@ if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): self._partial_length = True - return None + return '...' # raise api.FFIError("unsupported expression: expected a " "simple numeric constant") diff --git a/lib_pypy/cffi/gc_weakref.py b/lib_pypy/cffi/gc_weakref.py --- a/lib_pypy/cffi/gc_weakref.py +++ b/lib_pypy/cffi/gc_weakref.py @@ -14,6 +14,6 @@ def build(self, cdata, destructor): # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi.typeof(cdata), cdata) + new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) self.data[ref(new_cdata, self.remove)] = destructor, cdata return new_cdata diff --git a/lib_pypy/cffi/lock.py b/lib_pypy/cffi/lock.py new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,7 +1,10 @@ import weakref +from .lock import allocate_lock + class BaseTypeByIdentity(object): is_array_type = False + is_raw_function = False def get_c_name(self, replace_with='', context='a C file'): result = self.c_name_with_marker @@ -146,6 +149,7 @@ # a function, but not a pointer-to-function. The backend has no # notion of such a type; it's used temporarily by parsing. _base_pattern = '(&)(%s)' + is_raw_function = True def build_backend_type(self, ffi, finishlist): from . import api @@ -192,10 +196,6 @@ _base_pattern = " const *&" _base_pattern_array = "(const *&)" - def build_backend_type(self, ffi, finishlist): - BPtr = PointerType(self.totype).get_cached_btype(ffi, finishlist) - return BPtr - const_voidp_type = ConstPointerType(void_type) @@ -216,10 +216,12 @@ self.item = item self.length = length # - if self.length is None: + if length is None: brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' else: - brackets = '&[%d]' % self.length + brackets = '&[%d]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) @@ -227,6 +229,10 @@ return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): + if self.length == '...': + from . import api + raise api.CDefError("cannot render the type %r: unknown length" % + (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) @@ -252,6 +258,7 @@ class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None completed = False + partial = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -303,20 +310,21 @@ return # not completing it: it's an opaque struct # self.completed = 1 - fldtypes = tuple(tp.get_cached_btype(ffi, finishlist) - for tp in self.fldtypes) # if self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) ffi._backend.complete_struct_or_union(BType, lst, self) # else: + fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # - if isinstance(ftype, ArrayType) and ftype.length is None: + if isinstance(ftype, ArrayType) and ftype.length == '...': # fix the length to match the total size BItemType = ftype.item.get_cached_btype(ffi, finishlist) nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) @@ -327,18 +335,20 @@ ftype = ftype.resolve_length(nlen) self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) - BArrayType = ftype.get_cached_btype(ffi, finishlist) - fldtypes = (fldtypes[:i] + (BArrayType,) + - fldtypes[i+1:]) - continue # - bitemsize = ffi.sizeof(fldtypes[i]) - if bitemsize != fsize: - self._verification_error( - "field '%s.%s' is declared as %d bytes, but is " - "really %d bytes" % (self.name, - self.fldnames[i] or '{}', - bitemsize, fsize)) + BFieldType = ftype.get_cached_btype(ffi, finishlist) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) + fldtypes.append(BFieldType) + # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) ffi._backend.complete_struct_or_union(BType, lst, self, totalsize, totalalignment) @@ -348,11 +358,6 @@ from .ffiplatform import VerificationError raise VerificationError(msg) - -class StructType(StructOrUnion): - kind = 'struct' - partial = False - def check_not_partial(self): if self.partial and self.fixedlayout is None: from . import ffiplatform @@ -361,19 +366,18 @@ def build_backend_type(self, ffi, finishlist): self.check_not_partial() finishlist.append(self) - - return global_cache(self, ffi, 'new_struct_type', + # + return global_cache(self, ffi, 'new_%s_type' % self.kind, self.get_official_name(), key=self) +class StructType(StructOrUnion): + kind = 'struct' + + class UnionType(StructOrUnion): kind = 'union' - def build_backend_type(self, ffi, finishlist): - finishlist.append(self) - return global_cache(self, ffi, 'new_union_type', - self.get_official_name(), key=self) - class EnumType(StructOrUnionOrEnum): kind = 'enum' @@ -387,6 +391,12 @@ self.baseinttype = baseinttype self.build_c_name_with_marker() + def force_the_name(self, forcename): + StructOrUnionOrEnum.force_the_name(self, forcename) + if self.forcename is None: + name = self.get_official_name() + self.forcename = '$' + name.replace(' ', '_') + def check_not_partial(self): if self.partial and not self.partial_resolved: from . import ffiplatform @@ -444,6 +454,9 @@ tp = StructType(structname, None, None, None) return NamedPointerType(tp, name) + +global_lock = allocate_lock() + def global_cache(srctype, ffi, funcname, *args, **kwds): key = kwds.pop('key', (funcname, args)) assert not kwds @@ -464,8 +477,17 @@ res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: raise NotImplementedError("%r: %s" % (srctype, e)) - ffi._backend.__typecache[key] = res - return res + # note that setdefault() on WeakValueDictionary is not atomic + # and contains a rare bug (http://bugs.python.org/issue19542); + # we have to use a lock and do it ourselves + cache = ffi._backend.__typecache + with global_lock: + res1 = cache.get(key) + if res1 is None: + cache[key] = res + return res + else: + return res1 def pointer_cache(ffi, BType): return global_cache('?', ffi, 'new_pointer_type', BType) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -15,7 +15,7 @@ def patch_extension_kwds(self, kwds): pass - def find_module(self, module_name, path, so_suffix): + def find_module(self, module_name, path, so_suffixes): try: f, filename, descr = imp.find_module(module_name, path) except ImportError: @@ -25,7 +25,7 @@ # Note that after a setuptools installation, there are both .py # and .so files with the same basename. The code here relies on # imp.find_module() locating the .so in priority. - if descr[0] != so_suffix: + if descr[0] not in so_suffixes: return None return filename @@ -160,7 +160,10 @@ def __dir__(self): return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() - module._cffi_setup(lst, ffiplatform.VerificationError, library) + if module._cffi_setup(lst, ffiplatform.VerificationError, library): + import warnings + warnings.warn("reimporting %r might overwrite older definitions" + % (self.verifier.get_module_name())) # # finally, call the loaded_cpy_xxx() functions. This will perform # the final adjustments, like copying the Python->C wrapper @@ -280,8 +283,8 @@ return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, model.ArrayType): - return '_cffi_from_c_deref((char *)%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) elif isinstance(tp, model.StructType): if tp.fldnames is None: raise TypeError("'%s' is used as %s, but is opaque" % ( @@ -464,11 +467,14 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return _cffi_get_struct_layout(nums);') @@ -491,7 +497,7 @@ # function = getattr(module, layoutfuncname) layout = function() - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -528,9 +534,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -566,7 +573,7 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True): + vartp=None, delayed=True, size_too=False): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -597,6 +604,15 @@ '(unsigned long long)(%s));' % (name,)) prnt(' if (o == NULL)') prnt(' return -1;') + if size_too: + prnt(' {') + prnt(' PyObject *o1 = o;') + prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));' + % (name,)) + prnt(' Py_DECREF(o1);') + prnt(' if (o == NULL)') + prnt(' return -1;') + prnt(' }') prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name) prnt(' Py_DECREF(o);') prnt(' if (res < 0)') @@ -633,12 +649,23 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %d, ' - 'not %d",') - prnt(' "%s", "%s", (int)%s, %d);' % ( - name, enumerator, enumerator, enumvalue)) + prnt(' "enum %s: %s has the real value %s, ' + 'not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + name, enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return %s;' % self._chained_list_constants[True]) @@ -677,15 +704,16 @@ def _generate_cpy_variable_collecttype(self, tp, name): if isinstance(tp, model.ArrayType): - self._do_collect_type(tp) + tp_ptr = model.PointerType(tp.item) else: tp_ptr = model.PointerType(tp) - self._do_collect_type(tp_ptr) + self._do_collect_type(tp_ptr) def _generate_cpy_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): tp_ptr = model.PointerType(tp.item) - self._generate_cpy_const(False, name, tp, vartp=tp_ptr) + self._generate_cpy_const(False, name, tp, vartp=tp_ptr, + size_too = (tp.length == '...')) else: tp_ptr = model.PointerType(tp) self._generate_cpy_const(False, name, tp_ptr, category='var') @@ -694,11 +722,29 @@ _loading_cpy_variable = _loaded_noop def _loaded_cpy_variable(self, tp, name, module, library): + value = getattr(library, name) if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the - return # sense that "a=..." is forbidden + # sense that "a=..." is forbidden + if tp.length == '...': + assert isinstance(value, tuple) + (value, size) = value + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. - ptr = getattr(library, name) + ptr = value delattr(library, name) def getter(library): return ptr[0] @@ -711,12 +757,9 @@ def _generate_setup_custom(self): prnt = self._prnt - prnt('static PyObject *_cffi_setup_custom(PyObject *lib)') + prnt('static int _cffi_setup_custom(PyObject *lib)') prnt('{') - prnt(' if (%s < 0)' % self._chained_list_constants[True]) - prnt(' return NULL;') - prnt(' Py_INCREF(Py_None);') - prnt(' return Py_None;') + prnt(' return %s;' % self._chained_list_constants[True]) prnt('}') cffimod_header = r''' @@ -834,17 +877,20 @@ static void *_cffi_exports[_CFFI_NUM_EXPORTS]; static PyObject *_cffi_types, *_cffi_VerificationError; -static PyObject *_cffi_setup_custom(PyObject *lib); /* forward */ +static int _cffi_setup_custom(PyObject *lib); /* forward */ static PyObject *_cffi_setup(PyObject *self, PyObject *args) { PyObject *library; + int was_alive = (_cffi_types != NULL); if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, &library)) return NULL; Py_INCREF(_cffi_types); Py_INCREF(_cffi_VerificationError); - return _cffi_setup_custom(library); + if (_cffi_setup_custom(library) < 0) + return NULL; + return PyBool_FromLong(was_alive); } static void _cffi_init(void) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -20,15 +20,15 @@ # up in kwds['export_symbols']. kwds.setdefault('export_symbols', self.export_symbols) - def find_module(self, module_name, path, so_suffix): - basename = module_name + so_suffix - if path is None: - path = sys.path - for dirname in path: - filename = os.path.join(dirname, basename) - if os.path.isfile(filename): - return filename - return None + def find_module(self, module_name, path, so_suffixes): + for so_suffix in so_suffixes: + basename = module_name + so_suffix + if path is None: + path = sys.path + for dirname in path: + filename = os.path.join(dirname, basename) + if os.path.isfile(filename): + return filename def collect_types(self): pass # not needed in the generic engine @@ -173,6 +173,7 @@ newfunction = self._load_constant(False, tp, name, module) else: indirections = [] + base_tp = tp if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): indirect_args = [] for i, typ in enumerate(tp.args): @@ -186,16 +187,18 @@ wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) for i, typ in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, typ) + newfunction = self._make_struct_wrapper(newfunction, i, typ, + base_tp) setattr(library, name, newfunction) type(library)._cffi_dir.append(name) - def _make_struct_wrapper(self, oldfunc, i, tp): + def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): backend = self.ffi._backend BType = self.ffi._get_cached_btype(tp) def newfunc(*args): args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] return oldfunc(*args) + newfunc._cffi_base_type = base_tp return newfunc # ---------- @@ -252,11 +255,14 @@ prnt(' static ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, _, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) - prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return nums[i];') @@ -270,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi.typeof("ssize_t(*)(ssize_t)") + BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -279,7 +285,7 @@ if x < 0: break layout.append(x) num += 1 - if isinstance(tp, model.StructType) and tp.partial: + if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] @@ -316,9 +322,10 @@ continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) - BField = ffi._get_cached_btype(ftype) - check(layout[i+1], ffi.sizeof(BField), - "wrong size for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) @@ -379,15 +386,17 @@ def _load_constant(self, is_int, tp, name, module): funcname = '_cffi_const_%s' % name if is_int: - BFunc = self.ffi.typeof("int(*)(long long*)") + BType = self.ffi._typeof_locked("long long*")[0] + BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) - p = self.ffi.new("long long*") + p = self.ffi.new(BType) negative = function(p) value = int(p[0]) if value < 0 and not negative: - value += (1 << (8*self.ffi.sizeof("long long"))) + BLongLong = self.ffi._typeof_locked("long long")[0] + value += (1 << (8*self.ffi.sizeof(BLongLong))) else: - BFunc = self.ffi.typeof(tp.get_c_name('(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() return value @@ -413,11 +422,22 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - prnt(' if (%s != %d) {' % (enumerator, enumvalue)) + if enumvalue < 0: + prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + enumerator, enumerator, enumvalue)) + else: + prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + enumerator, enumerator, enumvalue)) + prnt(' char buf[64];') + prnt(' if ((%s) < 0)' % enumerator) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + enumerator) prnt(' snprintf(out_error, 255,' - '"%s has the real value %d, not %d",') - prnt(' "%s", (int)%s, %d);' % ( - enumerator, enumerator, enumvalue)) + ' "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % ( + enumerator, enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') @@ -431,10 +451,11 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BFunc = self.ffi.typeof("int(*)(char*)") + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = '_cffi_e_%s_%s' % (prefix, name) function = module.load_function(BFunc, funcname) - p = self.ffi.new("char[]", 256) + p = self.ffi.new(BType, 256) if function(p) < 0: error = self.ffi.string(p) if sys.version_info >= (3,): @@ -465,6 +486,14 @@ def _generate_gen_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): + if tp.length == '...': + prnt = self._prnt + funcname = '_cffi_sizeof_%s' % (name,) + self.export_symbols.append(funcname) + prnt("size_t %s(void)" % funcname) + prnt("{") + prnt(" return sizeof(%s);" % (name,)) + prnt("}") tp_ptr = model.PointerType(tp.item) self._generate_gen_const(False, name, tp_ptr) else: @@ -476,6 +505,18 @@ def _loaded_gen_variable(self, tp, name, module, library): if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the # sense that "a=..." is forbidden + if tp.length == '...': + funcname = '_cffi_sizeof_%s' % (name,) + BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0] + function = module.load_function(BFunc, funcname) + size = function() + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise ffiplatform.VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) tp_ptr = model.PointerType(tp.item) value = self._load_constant(False, tp_ptr, name, module) # 'value' is a which we have to replace with @@ -489,7 +530,7 @@ # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. funcname = '_cffi_var_%s' % name - BFunc = self.ffi.typeof(tp.get_c_name('*(*)(void)', name)) + BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0] function = module.load_function(BFunc, funcname) ptr = function() def getter(library): diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -31,7 +31,7 @@ k2 = k2.lstrip('0').rstrip('L') modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key, k1, k2) - suffix = _get_so_suffix() + suffix = _get_so_suffixes()[0] self.tmpdir = tmpdir or _caller_dir_pycache() self.sourcefilename = os.path.join(self.tmpdir, modulename + '.c') self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) @@ -42,18 +42,21 @@ def write_source(self, file=None): """Write the C source code. It is produced in 'self.sourcefilename', which can be tweaked beforehand.""" - if self._has_source and file is None: - raise ffiplatform.VerificationError("source code already written") - self._write_source(file) + with self.ffi._lock: + if self._has_source and file is None: + raise ffiplatform.VerificationError( + "source code already written") + self._write_source(file) def compile_module(self): """Write the C source code (if not done already) and compile it. This produces a dynamic link library in 'self.modulefilename'.""" - if self._has_module: - raise ffiplatform.VerificationError("module already compiled") - if not self._has_source: - self._write_source() - self._compile_module() + with self.ffi._lock: + if self._has_module: + raise ffiplatform.VerificationError("module already compiled") + if not self._has_source: + self._write_source() + self._compile_module() def load_library(self): """Get a C module from this Verifier instance. @@ -62,11 +65,14 @@ operations to the C module. If necessary, the C code is written and compiled first. """ - if not self._has_module: - self._locate_module() + with self.ffi._lock: if not self._has_module: - self.compile_module() - return self._load_library() + self._locate_module() + if not self._has_module: + if not self._has_source: + self._write_source() + self._compile_module() + return self._load_library() def get_module_name(self): basename = os.path.basename(self.modulefilename) @@ -81,7 +87,9 @@ def get_extension(self): if not self._has_source: - self._write_source() + with self.ffi._lock: + if not self._has_source: + self._write_source() sourcename = ffiplatform.maybe_relative_path(self.sourcefilename) modname = self.get_module_name() return ffiplatform.get_extension(sourcename, modname, **self.kwds) @@ -103,7 +111,7 @@ else: path = None filename = self._vengine.find_module(self.get_module_name(), path, - _get_so_suffix()) + _get_so_suffixes()) if filename is None: return self.modulefilename = filename @@ -193,7 +201,7 @@ if keep_so: suffix = '.c' # only remove .c files else: - suffix = _get_so_suffix().lower() + suffix = _get_so_suffixes()[0].lower() for fn in filelist: if fn.lower().startswith('_cffi_') and ( fn.lower().endswith(suffix) or fn.lower().endswith('.c')): @@ -213,15 +221,20 @@ except OSError: pass -def _get_so_suffix(): +def _get_so_suffixes(): + suffixes = [] for suffix, mode, type in imp.get_suffixes(): if type == imp.C_EXTENSION: - return suffix - # bah, no C_EXTENSION available. Occurs on pypy without cpyext - if sys.platform == 'win32': - return ".pyd" - else: - return ".so" + suffixes.append(suffix) + + if not suffixes: + # bah, no C_EXTENSION available. Occurs on pypy without cpyext + if sys.platform == 'win32': + suffixes = [".pyd"] + else: + suffixes = [".so"] + + return suffixes def _ensure_dir(filename): try: diff --git a/lib_pypy/numpypy/__init__.py b/lib_pypy/numpypy/__init__.py deleted file mode 100644 --- a/lib_pypy/numpypy/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -import core -from core import * -import lib -from lib import * - -from __builtin__ import bool, int, long, float, complex, object, unicode, str -from core import abs, max, min - -__version__ = '1.7.0' - -import os -def get_include(): - """ - Return the directory that contains the NumPy \\*.h header files. - - Extension modules that need to compile against NumPy should use this - function to locate the appropriate include directory. - - Notes - ----- - When using ``distutils``, for example in ``setup.py``. - :: - - import numpy as np - ... - Extension('extension_name', ... - include_dirs=[np.get_include()]) - ... - - """ - try: - import numpy - except: - # running from pypy source directory - head, tail = os.path.split(os.path.dirname(os.path.abspath(__file__))) - return os.path.join(head, '../include') - else: - # using installed numpy core headers - import numpy.core as core - d = os.path.join(os.path.dirname(core.__file__), 'include') - return d - - - -__all__ = ['__version__', 'get_include'] -__all__ += core.__all__ -__all__ += lib.__all__ -#import sys -#sys.modules.setdefault('numpy', sys.modules['numpypy']) - - diff --git a/lib_pypy/numpypy/core/__init__.py b/lib_pypy/numpypy/core/__init__.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from . import multiarray -from . import umath -from . import numeric -from .numeric import * -from . import fromnumeric -from .fromnumeric import * -from . import shape_base -from .shape_base import * - -from .fromnumeric import amax as max, amin as min, \ - round_ as round -from .numeric import absolute as abs - -__all__ = [] -__all__ += numeric.__all__ -__all__ += fromnumeric.__all__ -__all__ += shape_base.__all__ diff --git a/lib_pypy/numpypy/core/_methods.py b/lib_pypy/numpypy/core/_methods.py deleted file mode 100644 --- a/lib_pypy/numpypy/core/_methods.py +++ /dev/null @@ -1,109 +0,0 @@ -# Array methods which are called by the both the C-code for the method -# and the Python code for the NumPy-namespace function - -import multiarray as mu -import umath as um -from numeric import asanyarray - -def _amax(a, axis=None, out=None, keepdims=False): - return um.maximum.reduce(a, axis=axis, - out=out, keepdims=keepdims) - -def _amin(a, axis=None, out=None, keepdims=False): - return um.minimum.reduce(a, axis=axis, - out=out, keepdims=keepdims) - -def _sum(a, axis=None, dtype=None, out=None, keepdims=False): - return um.add.reduce(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def _prod(a, axis=None, dtype=None, out=None, keepdims=False): - return um.multiply.reduce(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def _any(a, axis=None, dtype=None, out=None, keepdims=False): - return um.logical_or.reduce(a, axis=axis, dtype=dtype, out=out, - keepdims=keepdims) - -def _all(a, axis=None, dtype=None, out=None, keepdims=False): - return um.logical_and.reduce(a, axis=axis, dtype=dtype, out=out, - keepdims=keepdims) - -def _count_reduce_items(arr, axis): - if axis is None: - axis = tuple(xrange(arr.ndim)) - if not isinstance(axis, tuple): - axis = (axis,) - items = 1 - for ax in axis: - items *= arr.shape[ax] - return items - -def _mean(a, axis=None, dtype=None, out=None, keepdims=False): - arr = asanyarray(a) - - # Upgrade bool, unsigned int, and int to float64 - if dtype is None and arr.dtype.kind in ['b','u','i']: - ret = um.add.reduce(arr, axis=axis, dtype='f8', - out=out, keepdims=keepdims) - else: - ret = um.add.reduce(arr, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) From noreply at buildbot.pypy.org Fri Nov 22 15:20:44 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 22 Nov 2013 15:20:44 +0100 (CET) Subject: [pypy-commit] pypy voidtype_strformat: fix translation Message-ID: <20131122142044.64B731C0352@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: voidtype_strformat Changeset: r68282:dfe74941b910 Date: 2013-11-22 16:19 +0200 http://bitbucket.org/pypy/pypy/changeset/dfe74941b910/ Log: fix translation diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1805,8 +1805,9 @@ "Void arrays return a buffer object for item(), unless fields are defined, in which case a tuple is returned." ''' + assert isinstance(item, interp_boxes.W_VoidBox) dt = item.arr.dtype - tpl =() + ret_unwrapped = [] for name in dt.fieldnames: ofs, dtype = dt.fields[name] if isinstance(dtype.itemtype, VoidType): @@ -1816,11 +1817,11 @@ if isinstance (read_val, interp_boxes.W_StringBox): # StringType returns a str read_val = space.wrap(dtype.itemtype.to_str(read_val)) - tpl = tpl + (read_val,) - if len(tpl) == 0: + ret_unwrapped = ret_unwrapped + [read_val,] + if len(ret_unwrapped) == 0: raise OperationError(space.w_NotImplementedError, space.wrap( "item() for Void aray with no fields not implemented")) - return space.wrap(tpl) + return space.newtuple(ret_unwrapped) class RecordType(FlexibleType): T = lltype.Char From noreply at buildbot.pypy.org Fri Nov 22 19:00:29 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 22 Nov 2013 19:00:29 +0100 (CET) Subject: [pypy-commit] pypy default: add a failing test Message-ID: <20131122180029.5515F1C0356@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r68283:a3c3b75a7f2b Date: 2013-11-22 18:58 +0100 http://bitbucket.org/pypy/pypy/changeset/a3c3b75a7f2b/ Log: add a failing test diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -286,3 +286,20 @@ assert dt.num == 11 + def test_pass_ndarray_object_to_c(self): + skip('fixme') + from _numpypy.multiarray import ndarray + mod = self.import_extension('foo', [ + ("check_array", "METH_VARARGS", + ''' + PyObject* obj; + if (!PyArg_ParseTuple(args, "O!", &PyArray_Type, &obj)) + return NULL; + Py_INCREF(obj); + return obj; + '''), + ], prologue='#include ') + array = ndarray((3, 4), dtype='d') + assert mod.check_array(array) is array + raises(TypeError, "mod.check_array(42)") + From noreply at buildbot.pypy.org Sat Nov 23 12:30:04 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 23 Nov 2013 12:30:04 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: use op.* instead of SpaceOperation in flatten_star_args() Message-ID: <20131123113004.9C0521C156C@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68284:0339d8e5180b Date: 2013-11-23 12:27 +0100 http://bitbucket.org/pypy/pypy/changeset/0339d8e5180b/ Log: use op.* instead of SpaceOperation in flatten_star_args() diff --git a/rpython/annotator/specialize.py b/rpython/annotator/specialize.py --- a/rpython/annotator/specialize.py +++ b/rpython/annotator/specialize.py @@ -5,6 +5,7 @@ from rpython.tool.algo.unionfind import UnionFind from rpython.flowspace.model import Block, Link, Variable, SpaceOperation from rpython.flowspace.model import checkgraph +from rpython.flowspace.operation import op from rpython.annotator import model as annmodel from rpython.flowspace.argument import Signature @@ -33,7 +34,8 @@ argscopy = [Variable(v) for v in graph.getargs()] starargs = [Variable('stararg%d'%i) for i in range(nb_extra_args)] newstartblock = Block(argscopy[:-1] + starargs) - newtup = SpaceOperation('newtuple', starargs, argscopy[-1]) + newtup = op.newtuple(*starargs) + newtup.result = argscopy[-1] newstartblock.operations.append(newtup) newstartblock.closeblock(Link(argscopy, graph.startblock)) graph.startblock = newstartblock From noreply at buildbot.pypy.org Sat Nov 23 12:37:27 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 23 Nov 2013 12:37:27 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: kill unnecessary generator_mark SpaceOperation Message-ID: <20131123113727.3DC131C156C@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68285:11e46d3b0aeb Date: 2013-11-23 12:36 +0100 http://bitbucket.org/pypy/pypy/changeset/11e46d3b0aeb/ Log: kill unnecessary generator_mark SpaceOperation diff --git a/rpython/flowspace/generator.py b/rpython/flowspace/generator.py --- a/rpython/flowspace/generator.py +++ b/rpython/flowspace/generator.py @@ -98,10 +98,6 @@ # First, always run simplify_graph in order to reduce the number of # variables passed around simplify_graph(graph) - # - assert graph.startblock.operations[0].opname == 'generator_mark' - graph.startblock.operations.pop(0) - # insert_empty_startblock(None, graph) _insert_reads(graph.startblock, Entry.varnames) Entry.block = graph.startblock diff --git a/rpython/flowspace/pygraph.py b/rpython/flowspace/pygraph.py --- a/rpython/flowspace/pygraph.py +++ b/rpython/flowspace/pygraph.py @@ -1,8 +1,7 @@ """ Implements flow graphs for Python callables """ -from rpython.flowspace.model import (FunctionGraph, Constant, Variable, - SpaceOperation) +from rpython.flowspace.model import FunctionGraph, Constant, Variable from rpython.flowspace.framestate import FrameState class PyGraph(FunctionGraph): @@ -17,10 +16,6 @@ data[i] = Variable() state = FrameState(data + [Constant(None), Constant(None)], [], 0) initialblock = SpamBlock(state) - if code.is_generator: - initialblock.operations.append( - SpaceOperation('generator_mark', [], Variable())) - super(PyGraph, self).__init__(self._sanitize_funcname(func), initialblock) self.func = func self.signature = code.signature From noreply at buildbot.pypy.org Sat Nov 23 13:24:16 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 23 Nov 2013 13:24:16 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: Don't use raw SpaceOperations in split_block() Message-ID: <20131123122416.4E7651C0352@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68286:de329cbe402e Date: 2013-11-23 12:49 +0100 http://bitbucket.org/pypy/pypy/changeset/de329cbe402e/ Log: Don't use raw SpaceOperations in split_block() diff --git a/rpython/translator/unsimplify.py b/rpython/translator/unsimplify.py --- a/rpython/translator/unsimplify.py +++ b/rpython/translator/unsimplify.py @@ -63,7 +63,7 @@ #but only for variables that are produced in the old block and needed in #the new one varmap = {} - vars_produced_in_new_block = {} + vars_produced_in_new_block = set() def get_new_name(var): if var is None: return None @@ -77,11 +77,10 @@ moved_operations = block.operations[index:] new_moved_ops = [] for op in moved_operations: - newop = SpaceOperation(op.opname, - [get_new_name(arg) for arg in op.args], - op.result) + repl = dict((arg, get_new_name(arg)) for arg in op.args) + newop = op.replace(repl) new_moved_ops.append(newop) - vars_produced_in_new_block[op.result] = True + vars_produced_in_new_block.add(op.result) moved_operations = new_moved_ops links = block.exits block.exits = None From noreply at buildbot.pypy.org Sat Nov 23 13:24:17 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 23 Nov 2013 13:24:17 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: avoid raw SpaceOperation Message-ID: <20131123122417.A22C31C154F@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68287:fe79ac378ab2 Date: 2013-11-23 13:23 +0100 http://bitbucket.org/pypy/pypy/changeset/fe79ac378ab2/ Log: avoid raw SpaceOperation diff --git a/rpython/rtyper/test/test_llinterp.py b/rpython/rtyper/test/test_llinterp.py --- a/rpython/rtyper/test/test_llinterp.py +++ b/rpython/rtyper/test/test_llinterp.py @@ -336,14 +336,15 @@ def test_funny_links(): from rpython.flowspace.model import Block, FunctionGraph, \ - SpaceOperation, Variable, Constant, Link + Variable, Constant, Link + from rpython.flowspace.operation import op for i in range(2): v_i = Variable("i") - v_case = Variable("case") block = Block([v_i]) g = FunctionGraph("is_one", block) - block.operations.append(SpaceOperation("eq", [v_i, Constant(1)], v_case)) - block.exitswitch = v_case + op1 = op.eq(v_i, Constant(1)) + block.operations.append(op1) + block.exitswitch = op1.result tlink = Link([Constant(1)], g.returnblock, True) flink = Link([Constant(0)], g.returnblock, False) links = [tlink, flink] From noreply at buildbot.pypy.org Sat Nov 23 13:44:05 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 23 Nov 2013 13:44:05 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: move consider() from SpaceOperation to HLOperaation; add op.hint Message-ID: <20131123124405.BC9A51C00B9@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68288:b1788bf86873 Date: 2013-11-23 13:43 +0100 http://bitbucket.org/pypy/pypy/changeset/b1788bf86873/ Log: move consider() from SpaceOperation to HLOperaation; add op.hint NB: The HLOperation are now precisely those that are handled by the annotator. diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py --- a/rpython/flowspace/model.py +++ b/rpython/flowspace/model.py @@ -439,12 +439,6 @@ newresult = mapping.get(self.result, self.result) return type(self)(self.opname, newargs, newresult, self.offset) - def consider(self, annotator, *argcells): - consider_meth = getattr(annotator, 'consider_op_' + self.opname, None) - if not consider_meth: - raise Exception("unknown op: %r" % op) - return consider_meth(*argcells) - class Atom(object): def __init__(self, name): self.__name__ = name # make save_global happy diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -83,6 +83,12 @@ def constfold(self): return None + def consider(self, annotator, *argcells): + consider_meth = getattr(annotator, 'consider_op_' + self.opname, None) + if not consider_meth: + raise Exception("unknown op: %r" % op) + return consider_meth(*argcells) + class PureOperation(HLOperation): pure = True @@ -333,6 +339,7 @@ add_operator('newtuple', None, pure=True, pyfunc=lambda *args:args) add_operator('newlist', None) add_operator('newslice', 3) +add_operator('hint', None) class Pow(PureOperation): diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -6,7 +6,7 @@ """ import py -from rpython.flowspace.model import (SpaceOperation, Variable, Constant, +from rpython.flowspace.model import (Variable, Constant, c_last_exception, checkgraph, mkentrymap) from rpython.flowspace.operation import OverflowingOperation, op from rpython.rlib import rarithmetic @@ -814,10 +814,10 @@ def run(self, vlist, vmeth, appendblock): # first check that the 'append' method object doesn't escape - for op in appendblock.operations: - if op.opname == 'simple_call' and op.args[0] is vmeth: + for hlop in appendblock.operations: + if hlop.opname == 'simple_call' and hlop.args[0] is vmeth: pass - elif vmeth in op.args: + elif vmeth in hlop.args: raise DetectorFailed # used in another operation for link in appendblock.exits: if vmeth in link.args: @@ -922,20 +922,19 @@ link = iterblock.exits[0] vlist = self.contains_vlist(link.args) assert vlist - for op in iterblock.operations: - res = self.variable_families.find_rep(op.result) + for hlop in iterblock.operations: + res = self.variable_families.find_rep(hlop.result) if res is viterfamily: break else: raise AssertionError("lost 'iter' operation") - vlist2 = Variable(vlist) chint = Constant({'maxlength': True}) - iterblock.operations += [ - SpaceOperation('hint', [vlist, op.args[0], chint], vlist2)] + hint = op.hint(vlist, hlop.args[0], chint) + iterblock.operations.append(hint) link.args = list(link.args) for i in range(len(link.args)): if link.args[i] is vlist: - link.args[i] = vlist2 + link.args[i] = hint.result # - wherever the list exits the loop body, add a 'hint({fence})' for block in loopbody: @@ -954,8 +953,9 @@ vlist2 = newblock.inputargs[index] vlist3 = Variable(vlist2) newblock.inputargs[index] = vlist3 - newblock.operations.append( - SpaceOperation('hint', [vlist3, chints], vlist2)) + hint = op.hint(vlist3, chints) + hint.result = vlist2 + newblock.operations.append(hint) # done! From noreply at buildbot.pypy.org Sat Nov 23 14:04:36 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 23 Nov 2013 14:04:36 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: hg merge default Message-ID: <20131123130436.5E50A1C01F7@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68289:001ff2a06b0c Date: 2013-11-23 14:03 +0100 http://bitbucket.org/pypy/pypy/changeset/001ff2a06b0c/ Log: hg merge default diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -10,6 +10,8 @@ .. branch: numpy-newbyteorder Clean up numpy types, add newbyteorder functionality -.. branch windows-packaging +.. branch: windows-packaging Package tk/tcl runtime with win32 +.. branch: armhf-singlefloat +JIT support for singlefloats on ARM using the hardfloat ABI diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -286,3 +286,20 @@ assert dt.num == 11 + def test_pass_ndarray_object_to_c(self): + skip('fixme') + from _numpypy.multiarray import ndarray + mod = self.import_extension('foo', [ + ("check_array", "METH_VARARGS", + ''' + PyObject* obj; + if (!PyArg_ParseTuple(args, "O!", &PyArray_Type, &obj)) + return NULL; + Py_INCREF(obj); + return obj; + '''), + ], prologue='#include ') + array = ndarray((3, 4), dtype='d') + assert mod.check_array(array) is array + raises(TypeError, "mod.check_array(42)") + diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -151,6 +151,14 @@ endian = NPY_NATBYTE return space.wrap("%s%s%s" % (endian, basic, size)) + def descr_get_descr(self, space): + if not self.is_record_type(): + return space.newlist([space.newtuple([space.wrap(""), + self.descr_get_str(space)])]) + else: + raise OperationError(space.w_NotImplementedError, space.wrap( + "descr not implemented for record types")) + def descr_get_base(self, space): return space.wrap(self.base) @@ -447,6 +455,7 @@ fields = GetSetProperty(W_Dtype.descr_get_fields), names = GetSetProperty(W_Dtype.descr_get_names), hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), + descr = GetSetProperty(W_Dtype.descr_get_descr), ) W_Dtype.typedef.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -93,7 +93,11 @@ def descr_fill(self, space, w_value): self.fill(self.get_dtype().coerce(space, w_value)) - def descr_tostring(self, space): + def descr_tostring(self, space, w_order=None): + order = order_converter(space, w_order, NPY_CORDER) + if order == NPY_FORTRANORDER: + raise OperationError(space.w_NotImplementedError, space.wrap( + "unsupported value for order")) return space.wrap(loop.tostring(space, self)) def getitem_filter(self, space, arr): diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -832,6 +832,17 @@ assert x.dtype == int8 assert (x == array(42)).all() + def test_descr(self): + import numpy as np + assert np.dtype('i2')[::2].tostring() == '\x00\x01\x00\x03' assert array(0, dtype='i2').tostring() == '\x00\x00' + a = array([[1, 2], [3, 4]], dtype='i1') + for order in (None, False, 'C', 'K', 'a'): + assert a.tostring(order) == '\x01\x02\x03\x04' + import sys + for order in (True, 'F'): + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, a.tostring, order) + else: + assert a.tostring(order) == '\x01\x03\x02\x04' class AppTestRepr(BaseNumpyAppTest): @@ -3027,7 +3040,8 @@ from numpypy import dtype, array, zeros d = dtype([("x", "int", 3), ("y", "float", 5)]) - a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) + a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), + ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) for v in ['x', u'x', 0, -2]: assert (a[0][v] == [1, 2, 3]).all() @@ -3037,7 +3051,8 @@ assert (a[1][v] == [5.5, 6.5, 7.5, 8.5, 9.5]).all() for v in [-3, 2]: exc = raises(IndexError, "a[0][%d]" % v) - assert exc.value.message == "invalid index (%d)" % (v + 2 if v < 0 else v) + assert exc.value.message == "invalid index (%d)" % \ + (v + 2 if v < 0 else v) exc = raises(IndexError, "a[0]['z']") assert exc.value.message == "invalid index" exc = raises(IndexError, "a[0][None]") @@ -3107,7 +3122,8 @@ from numpypy import dtype, array d = dtype([("x", "int", 3), ("y", "float", 5)]) - a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) + a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), + ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) assert len(list(a[0])) == 2 diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -371,15 +371,19 @@ listdef.generalize(self.immutablevalue(e, False)) result = SomeList(listdef) elif tp is dict or tp is r_dict or tp is SomeOrderedDict.knowntype: + if tp is SomeOrderedDict.knowntype: + cls = SomeOrderedDict + else: + cls = SomeDict if need_const: key = Constant(x) try: return self.immutable_cache[key] except KeyError: - result = SomeDict(DictDef(self, - s_ImpossibleValue, - s_ImpossibleValue, - is_r_dict = tp is r_dict)) + result = cls(DictDef(self, + s_ImpossibleValue, + s_ImpossibleValue, + is_r_dict = tp is r_dict)) self.immutable_cache[key] = result if tp is r_dict: s_eqfn = self.immutablevalue(x.key_eq) @@ -412,10 +416,7 @@ dictdef.generalize_key(self.immutablevalue(ek, False)) dictdef.generalize_value(self.immutablevalue(ev, False)) dictdef.seen_prebuilt_key(ek) - if tp is SomeOrderedDict.knowntype: - result = SomeOrderedDict(dictdef) - else: - result = SomeDict(dictdef) + result = cls(dictdef) elif tp is weakref.ReferenceType: x1 = x() if x1 is None: diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4144,6 +4144,19 @@ a.build_types(f, [str, str]) assert ("format() is not RPython" in exc.value.msg) + def test_prebuilt_ordered_dict(self): + try: + from collections import OrderedDict + except ImportError: + py.test.skip("Please upgrade to python 2.7") + d = OrderedDict([("aa", 1)]) + + def f(): + return d + + a = self.RPythonAnnotator() + assert isinstance(a.build_types(f, []), annmodel.SomeOrderedDict) + def g(n): return [0, 1, 2, n] diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -460,13 +460,13 @@ check_negative_slice(start, end, "count") return SomeInteger(nonneg=True) - def method_strip(self, chr): + def method_strip(self, chr=None): return self.basestringclass(no_nul=self.no_nul) - def method_lstrip(self, chr): + def method_lstrip(self, chr=None): return self.basestringclass(no_nul=self.no_nul) - def method_rstrip(self, chr): + def method_rstrip(self, chr=None): return self.basestringclass(no_nul=self.no_nul) def method_join(self, s_list): diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -227,20 +227,81 @@ class HardFloatCallBuilder(ARMCallbuilder): + next_arg_vfp = 0 + next_arg_svfp = 0 + + def get_next_vfp(self, tp): + assert tp in 'fS' + if self.next_arg_vfp == -1: + return None + if tp == 'S': + i = self.next_arg_svfp + next_vfp = (i >> 1) + 1 + if not (i + 1) & 1: # i is even + self.next_arg_vfp = max(self.next_arg_vfp, next_vfp) + self.next_arg_svfp = self.next_arg_vfp << 1 + else: + self.next_arg_svfp += 1 + self.next_arg_vfp = next_vfp + lst = r.svfp_argument_regs + else: # 64bit double + i = self.next_arg_vfp + self.next_arg_vfp += 1 + if self.next_arg_svfp >> 1 == i: + self.next_arg_svfp = self.next_arg_vfp << 1 + lst = r.vfp_argument_regs + try: + return lst[i] + except IndexError: + self.next_arg_vfp = self.next_arg_svfp = -1 + return None + def prepare_arguments(self): non_float_locs = [] non_float_regs = [] float_locs = [] float_regs = [] stack_args = [] + singlefloats = None arglocs = self.arglocs argtypes = self.argtypes count = 0 # stack alignment counter on_stack = 0 - for arg in arglocs: - if arg.type != FLOAT: + for i in range(len(arglocs)): + argtype = INT + if i < len(argtypes) and argtypes[i] == 'S': + argtype = argtypes[i] + arg = arglocs[i] + if arg.is_float(): + argtype = FLOAT + reg = self.get_next_vfp(argtype) + if reg: + assert len(float_regs) < len(r.vfp_argument_regs) + float_locs.append(arg) + assert reg not in float_regs + float_regs.append(reg) + else: # float argument that needs to go on the stack + if count % 2 != 0: + stack_args.append(None) + count = 0 + on_stack += 1 + stack_args.append(arg) + on_stack += 2 + elif argtype == 'S': + # Singlefloat argument + if singlefloats is None: + singlefloats = [] + tgt = self.get_next_vfp(argtype) + if tgt: + singlefloats.append((arg, tgt)) + else: # Singlefloat argument that needs to go on the stack + # treated the same as a regular core register argument + count += 1 + on_stack += 1 + stack_args.append(arg) + else: if len(non_float_regs) < len(r.argument_regs): reg = r.argument_regs[len(non_float_regs)] non_float_locs.append(arg) @@ -249,18 +310,6 @@ count += 1 on_stack += 1 stack_args.append(arg) - else: - if len(float_regs) < len(r.vfp_argument_regs): - reg = r.vfp_argument_regs[len(float_regs)] - float_locs.append(arg) - float_regs.append(reg) - else: # float argument that needs to go on the stack - if count % 2 != 0: - stack_args.append(None) - count = 0 - on_stack += 1 - stack_args.append(arg) - on_stack += 2 # align the stack if count % 2 != 0: stack_args.append(None) @@ -275,13 +324,28 @@ non_float_locs.append(self.fnloc) non_float_regs.append(r.r4) self.fnloc = r.r4 + # remap values stored in vfp registers + remap_frame_layout(self.asm, float_locs, float_regs, r.vfp_ip) + if singlefloats: + for src, dest in singlefloats: + if src.is_float(): + assert 0, 'unsupported case' + if src.is_stack(): + # use special VLDR for 32bit + self.asm.regalloc_mov(src, r.ip) + src = r.ip + if src.is_imm(): + self.mc.gen_load_int(r.ip.value, src.value) + src = r.ip + if src.is_core_reg(): + self.mc.VMOV_cs(dest.value, src.value) # remap values stored in core registers remap_frame_layout(self.asm, non_float_locs, non_float_regs, r.ip) - # remap values stored in vfp registers - remap_frame_layout(self.asm, float_locs, float_regs, r.vfp_ip) def load_result(self): resloc = self.resloc + if self.restype == 'S': + self.mc.VMOV_sc(resloc.value, r.s0.value) # ensure the result is wellformed and stored in the correct location if resloc is not None and resloc.is_core_reg(): self._ensure_result_bit_extension(resloc, diff --git a/rpython/jit/backend/arm/codebuilder.py b/rpython/jit/backend/arm/codebuilder.py --- a/rpython/jit/backend/arm/codebuilder.py +++ b/rpython/jit/backend/arm/codebuilder.py @@ -178,6 +178,30 @@ | (dm & 0xF)) self.write32(instr) + def VMOV_sc(self, dest, src): + """move a single precision vfp register[src] to a core reg[dest]""" + self._VMOV_32bit(src, dest, to_arm_register=1) + + def VMOV_cs(self, dest, src): + """move a core register[src] to a single precision vfp + register[dest]""" + self._VMOV_32bit(dest, src, to_arm_register=0) + + def _VMOV_32bit(self, float_reg, core_reg, to_arm_register, cond=cond.AL): + """This instruction transfers the contents of a single-precision VFP + register to an ARM core register, or the contents of an ARM core + register to a single-precision VFP register. + """ + instr = (cond << 28 + | 0xE << 24 + | to_arm_register << 20 + | ((float_reg >> 1) & 0xF) << 16 + | core_reg << 12 + | 0xA << 8 + | (float_reg & 0x1) << 7 + | 1 << 4) + self.write32(instr) + def VMOV_cc(self, dd, dm, cond=cond.AL): sz = 1 # for 64-bit mode instr = (cond << 28 @@ -198,8 +222,16 @@ self._VCVT(target, source, cond, 0, 1) def _VCVT(self, target, source, cond, opc2, sz): - D = 0 - M = 0 + # A8.6.295 + to_integer = (opc2 >> 2) & 1 + if to_integer: + D = target & 1 + target >>= 1 + M = (source >> 4) & 1 + else: + M = source & 1 + source >>= 1 + D = (target >> 4) & 1 op = 1 instr = (cond << 28 | 0xEB8 << 16 @@ -216,8 +248,8 @@ def _VCVT_single_double(self, target, source, cond, sz): # double_to_single = (sz == '1'); - D = 0 - M = 0 + D = target & 1 if sz else (target >> 4) & 1 + M = (source >> 4) & 1 if sz else source & 1 instr = (cond << 28 | 0xEB7 << 16 | 0xAC << 4 diff --git a/rpython/jit/backend/arm/locations.py b/rpython/jit/backend/arm/locations.py --- a/rpython/jit/backend/arm/locations.py +++ b/rpython/jit/backend/arm/locations.py @@ -55,12 +55,8 @@ type = FLOAT width = 2 * WORD - def get_single_precision_regs(self): - return [VFPRegisterLocation(i) for i in - [self.value * 2, self.value * 2 + 1]] - def __repr__(self): - return 'vfp%d' % self.value + return 'vfp(d%d)' % self.value def is_core_reg(self): return False @@ -74,6 +70,14 @@ def is_float(self): return True +class SVFPRegisterLocation(VFPRegisterLocation): + """Single Precission VFP Register""" + _immutable_ = True + width = WORD + type = 'S' + + def __repr__(self): + return 'vfp(s%d)' % self.value class ImmLocation(AssemblerLocation): _immutable_ = True diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1102,17 +1102,16 @@ arg, res = arglocs assert arg.is_vfp_reg() assert res.is_core_reg() - self.mc.VCVT_float_to_int(r.vfp_ip.value, arg.value) - self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value) + self.mc.VCVT_float_to_int(r.svfp_ip.value, arg.value) + self.mc.VMOV_sc(res.value, r.svfp_ip.value) return fcond def emit_op_cast_int_to_float(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert res.is_vfp_reg() assert arg.is_core_reg() - self.mc.MOV_ri(r.ip.value, 0) - self.mc.VMOV_cr(res.value, arg.value, r.ip.value) - self.mc.VCVT_int_to_float(res.value, res.value) + self.mc.VMOV_cs(r.svfp_ip.value, arg.value) + self.mc.VCVT_int_to_float(res.value, r.svfp_ip.value) return fcond emit_op_llong_add = gen_emit_float_op('llong_add', 'VADD_i64') @@ -1147,15 +1146,14 @@ arg, res = arglocs assert arg.is_vfp_reg() assert res.is_core_reg() - self.mc.VCVT_f64_f32(r.vfp_ip.value, arg.value) - self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value) + self.mc.VCVT_f64_f32(r.svfp_ip.value, arg.value) + self.mc.VMOV_sc(res.value, r.svfp_ip.value) return fcond def emit_op_cast_singlefloat_to_float(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert res.is_vfp_reg() assert arg.is_core_reg() - self.mc.MOV_ri(r.ip.value, 0) - self.mc.VMOV_cr(res.value, arg.value, r.ip.value) - self.mc.VCVT_f32_f64(res.value, res.value) + self.mc.VMOV_cs(r.svfp_ip.value, arg.value) + self.mc.VCVT_f32_f64(res.value, r.svfp_ip.value) return fcond diff --git a/rpython/jit/backend/arm/registers.py b/rpython/jit/backend/arm/registers.py --- a/rpython/jit/backend/arm/registers.py +++ b/rpython/jit/backend/arm/registers.py @@ -1,8 +1,10 @@ from rpython.jit.backend.arm.locations import VFPRegisterLocation +from rpython.jit.backend.arm.locations import SVFPRegisterLocation from rpython.jit.backend.arm.locations import RegisterLocation registers = [RegisterLocation(i) for i in range(16)] vfpregisters = [VFPRegisterLocation(i) for i in range(16)] +svfpregisters = [SVFPRegisterLocation(i) for i in range(32)] [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15] = registers @@ -10,6 +12,10 @@ [d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15] = vfpregisters +# single precission VFP registers, 32-bit +for i in range(32): + globals()['s%d' % i] = svfpregisters[i] + # aliases for registers fp = r11 ip = r12 @@ -17,6 +23,7 @@ lr = r14 pc = r15 vfp_ip = d15 +svfp_ip = s31 all_regs = [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10] all_vfp_regs = vfpregisters[:-1] @@ -27,6 +34,7 @@ callee_restored_registers = callee_resp + [pc] vfp_argument_regs = caller_vfp_resp = [d0, d1, d2, d3, d4, d5, d6, d7] +svfp_argument_regs = [globals()['s%i' % i] for i in range(16)] callee_vfp_resp = [d8, d9, d10, d11, d12, d13, d14, d15] callee_saved_vfp_registers = callee_vfp_resp diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -22,7 +22,7 @@ supports_floats = True supports_longlong = False # XXX requires an implementation of # read_timestamp that works in user mode - supports_singlefloats = not detect_hardfloat() + supports_singlefloats = True from rpython.jit.backend.arm.arch import JITFRAME_FIXED_SIZE all_reg_indexes = range(len(all_regs)) diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -4,7 +4,6 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rarithmetic import r_uint -from rpython.rlib.objectmodel import we_are_translated from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform @@ -80,38 +79,6 @@ RTLD_NOW = cConfig.RTLD_NOW RTLD_LAZY = cConfig.RTLD_LAZY - _t_opened = {} - - def t_dlopen(name): - # for direct execution: can't use the regular way on FreeBSD :-( - # http://factor-language.blogspot.de/2009/02/note-about-libdl-functions-on-netbsd.html - import ctypes - if name: - name = rffi.charp2str(name) - else: - name = None - try: - res = ctypes.cdll.LoadLibrary(name) - except OSError, e: - raise DLOpenError(str(e)) - h = rffi.cast(rffi.VOIDP, res._handle) - _t_opened[rffi.cast(rffi.LONG, h)] = res - return h - - def t_dlclose(handle): - _t_opened.pop(rffi.cast(rffi.LONG, handle)) - return rffi.cast(rffi.INT, 0) - - def t_dldym(handle, name): - import ctypes - lib = _t_opened[rffi.cast(rffi.LONG, handle)] - try: - symbol = lib[name] - except AttributeError: - raise KeyError(name) - res = ctypes.cast(symbol, ctypes.c_void_p) - return rffi.cast(rffi.VOIDP, res.value or 0) - def dlerror(): # XXX this would never work on top of ll2ctypes, because # ctypes are calling dlerror itself, unsure if I can do much in this @@ -124,8 +91,6 @@ def dlopen(name, mode=-1): """ Wrapper around C-level dlopen """ - if not we_are_translated(): - return t_dlopen(name) if mode == -1: if RTLD_LOCAL is not None: mode = RTLD_LOCAL @@ -139,16 +104,11 @@ raise DLOpenError(err) return res - def dlclose(handle): - if not we_are_translated(): - return t_dlclose(handle) - return c_dlclose(handle) + dlclose = c_dlclose def dlsym(libhandle, name): """ Wrapper around C-level dlsym """ - if not we_are_translated(): - return t_dldym(libhandle, name) res = c_dlsym(libhandle, name) if not res: raise KeyError(name) diff --git a/rpython/rlib/test/test_rdynload.py b/rpython/rlib/test/test_rdynload.py --- a/rpython/rlib/test/test_rdynload.py +++ b/rpython/rlib/test/test_rdynload.py @@ -21,4 +21,3 @@ lltype.Signed)), dlsym(lib, 'abs')) assert 1 == handle(1) assert 1 == handle(-1) - dlclose(lib) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -9,6 +9,7 @@ from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import ll_str, llmemory +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.lltypesystem.lltype import (GcStruct, Signed, Array, Char, UniChar, Ptr, malloc, Bool, Void, GcArray, nullptr, cast_primitive, typeOf, staticAdtMethod, GcForwardReference) @@ -402,6 +403,46 @@ return result @jit.elidable + def ll_strip_default(s, left, right): + s_len = len(s.chars) + if s_len == 0: + return s.empty() + lpos = 0 + rpos = s_len - 1 + if left: + while lpos < rpos and s.chars[lpos].isspace(): + lpos += 1 + if right: + while lpos < rpos + 1 and s.chars[rpos].isspace(): + rpos -= 1 + if rpos < lpos: + return s.empty() + r_len = rpos - lpos + 1 + result = s.malloc(r_len) + s.copy_contents(s, result, lpos, 0, r_len) + return result + + @jit.elidable + def ll_strip_multiple(s, s2, left, right): + s_len = len(s.chars) + if s_len == 0: + return s.empty() + lpos = 0 + rpos = s_len - 1 + if left: + while lpos < rpos and LLHelpers.ll_contains(s2, s.chars[lpos]): + lpos += 1 + if right: + while lpos < rpos + 1 and LLHelpers.ll_contains(s2, s.chars[rpos]): + rpos -= 1 + if rpos < lpos: + return s.empty() + r_len = rpos - lpos + 1 + result = s.malloc(r_len) + s.copy_contents(s, result, lpos, 0, r_len) + return result + + @jit.elidable def ll_upper(s): s_chars = s.chars s_len = len(s_chars) diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -231,11 +231,22 @@ def rtype_method_strip(self, hop, left=True, right=True): rstr = hop.args_r[0].repr v_str = hop.inputarg(rstr.repr, arg=0) - v_char = hop.inputarg(rstr.char_repr, arg=1) - v_left = hop.inputconst(Bool, left) - v_right = hop.inputconst(Bool, right) + args_v = [v_str] + if len(hop.args_s) == 2: + if isinstance(hop.args_s[1], annmodel.SomeString): + v_stripstr = hop.inputarg(rstr.repr, arg=1) + args_v.append(v_stripstr) + func = self.ll.ll_strip_multiple + else: + v_char = hop.inputarg(rstr.char_repr, arg=1) + args_v.append(v_char) + func = self.ll.ll_strip + else: + func = self.ll.ll_strip_default + args_v.append(hop.inputconst(Bool, left)) + args_v.append(hop.inputconst(Bool, right)) hop.exception_is_here() - return hop.gendirectcall(self.ll.ll_strip, v_str, v_char, v_left, v_right) + return hop.gendirectcall(func, *args_v) def rtype_method_lstrip(self, hop): return self.rtype_method_strip(hop, left=True, right=False) diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -9,6 +9,7 @@ from rpython.rtyper.rstr import AbstractLLHelpers from rpython.rtyper.rtyper import TyperError from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.rtyper.annlowlevel import llstr, hlstr def test_parse_fmt(): @@ -457,6 +458,29 @@ res = self.interpret(left2, []) assert self.ll_to_string(res) == const('a') + def test_strip_multiple_chars(self): + const = self.const + def both(): + return const('!ab!').strip(const('!a')) + def left(): + return const('!+ab!').lstrip(const('!+')) + def right(): + return const('!ab!+').rstrip(const('!+')) + def empty(): + return const(' \t\t ').strip('\t ') + def left2(): + return const('a ').strip(' \t') + res = self.interpret(both, []) + assert self.ll_to_string(res) == const('b') + res = self.interpret(left, []) + assert self.ll_to_string(res) == const('ab!') + res = self.interpret(right, []) + assert self.ll_to_string(res) == const('!ab') + res = self.interpret(empty, []) + assert self.ll_to_string(res) == const('') + res = self.interpret(left2, []) + assert self.ll_to_string(res) == const('a') + def test_upper(self): const = self.const constchar = self.constchar @@ -1143,3 +1167,16 @@ self.interpret(f, [array, 4]) assert list(array) == list('abc'*4) lltype.free(array, flavor='raw') + + def test_strip_no_arg(self): + strings = [" xyz ", "", "\t\vx"] + + def f(i): + return strings[i].strip() + + res = self.interpret(f, [0]) + assert hlstr(res) == "xyz" + res = self.interpret(f, [1]) + assert hlstr(res) == "" + res = self.interpret(f, [2]) + assert hlstr(res) == "x" diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -49,7 +49,7 @@ sys.stdout.flush() -if sys.platform != 'win32' and hasattr(os, 'fork'): +if sys.platform != 'win32' and hasattr(os, 'fork') and not os.getenv("PYPY_DONT_RUN_SUBPROCESS", None): # do this at import-time, when the process is still tiny _source = os.path.dirname(os.path.abspath(__file__)) _source = os.path.join(_source, 'runsubprocess.py') # and not e.g. '.pyc' diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py --- a/rpython/translator/c/test/test_genc.py +++ b/rpython/translator/c/test/test_genc.py @@ -574,6 +574,22 @@ fn = compile(chooser, [bool]) assert fn(True) +def test_ordered_dict(): + try: + from collections import OrderedDict + except ImportError: + py.test.skip("Please update to Python 2.7") + + expected = [('ea', 1), ('bb', 2), ('c', 3), ('d', 4), ('e', 5), + ('ef', 6)] + d = OrderedDict(expected) + + def f(): + assert d.items() == expected + + fn = compile(f, []) + fn() + def test_inhibit_tail_call(): def foobar_fn(n): return 42 From noreply at buildbot.pypy.org Sat Nov 23 16:10:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 23 Nov 2013 16:10:12 +0100 (CET) Subject: [pypy-commit] cffi default: Include win64.obj in the source archive Message-ID: <20131123151012.6B3831C0206@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1424:6430e95da26e Date: 2013-11-23 16:10 +0100 http://bitbucket.org/cffi/cffi/changeset/6430e95da26e/ Log: Include win64.obj in the source archive diff --git a/MANIFEST.in b/MANIFEST.in --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,5 @@ recursive-include cffi *.py -recursive-include c *.c *.h *.asm *.py +recursive-include c *.c *.h *.asm *.py win64.obj recursive-include testing *.py recursive-include doc *.py *.rst Makefile *.bat recursive-include demo py.cleanup *.py From noreply at buildbot.pypy.org Sat Nov 23 16:21:56 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 23 Nov 2013 16:21:56 +0100 (CET) Subject: [pypy-commit] pypy default: Fix for issue #1646 Message-ID: <20131123152156.E45611C0225@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68290:eddf9beed68a Date: 2013-11-23 16:21 +0100 http://bitbucket.org/pypy/pypy/changeset/eddf9beed68a/ Log: Fix for issue #1646 diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -342,10 +342,8 @@ if space.is_w(w_startstop, space.w_None): start = 0 else: - start = space.int_w(w_startstop) - if start < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Indicies for islice() must be non-negative integers.")) + start = self.arg_int_w(w_startstop, 0, + "Indicies for islice() must be None or non-negative integers") w_stop = args_w[0] else: raise OperationError(space.w_TypeError, space.wrap("islice() takes at most 4 arguments (" + str(num_args) + " given)")) @@ -353,10 +351,8 @@ if space.is_w(w_stop, space.w_None): stop = -1 else: - stop = space.int_w(w_stop) - if stop < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Stop argument must be a non-negative integer or None.")) + stop = self.arg_int_w(w_stop, 0, + "Stop argument must be a non-negative integer or None.") stop = max(start, stop) # for obscure CPython compatibility if num_args == 2: @@ -364,10 +360,8 @@ if space.is_w(w_step, space.w_None): step = 1 else: - step = space.int_w(w_step) - if step < 1: - raise OperationError(space.w_ValueError, space.wrap( - "Step must be one or lager for islice().")) + step = self.arg_int_w(w_step, 1, + "Step for islice() must be a positive integer or None") else: step = 1 @@ -375,6 +369,18 @@ self.start = start self.stop = stop + def arg_int_w(self, w_obj, minimum, errormsg): + space = self.space + try: + result = space.int_w(w_obj) + except OperationError, e: + if e.async(space): + raise + result = -1 + if result < minimum: + raise OperationError(space.w_ValueError, space.wrap(errormsg)) + return result + def iter_w(self): return self.space.wrap(self) diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -304,6 +304,11 @@ raises(TypeError, itertools.islice, [], 0, 0, 0, 0) + # why not TypeError? Because CPython + raises(ValueError, itertools.islice, [], "a", 1, 2) + raises(ValueError, itertools.islice, [], 0, "a", 2) + raises(ValueError, itertools.islice, [], 0, 1, "a") + def test_chain(self): import itertools From noreply at buildbot.pypy.org Sat Nov 23 20:05:24 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 23 Nov 2013 20:05:24 +0100 (CET) Subject: [pypy-commit] pypy voidtype_strformat: document branch Message-ID: <20131123190524.73DD51C025A@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: voidtype_strformat Changeset: r68293:7eeca50ae4ec Date: 2013-11-23 21:01 +0200 http://bitbucket.org/pypy/pypy/changeset/7eeca50ae4ec/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -15,3 +15,6 @@ .. branch: armhf-singlefloat JIT support for singlefloats on ARM using the hardfloat ABI + +.. branch: voidtype_strformat +Better support for record numpy arrays From noreply at buildbot.pypy.org Sat Nov 23 20:05:23 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 23 Nov 2013 20:05:23 +0100 (CET) Subject: [pypy-commit] pypy default: fix tests for changeset 5ed628789a2c Message-ID: <20131123190523.505F91C0225@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r68292:287eb01c11f4 Date: 2013-11-23 20:05 +0200 http://bitbucket.org/pypy/pypy/changeset/287eb01c11f4/ Log: fix tests for changeset 5ed628789a2c diff --git a/rpython/jit/backend/x86/test/test_ztranslation_basic.py b/rpython/jit/backend/x86/test/test_ztranslation_basic.py --- a/rpython/jit/backend/x86/test/test_ztranslation_basic.py +++ b/rpython/jit/backend/x86/test/test_ztranslation_basic.py @@ -1,11 +1,11 @@ from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTest from rpython.jit.backend.x86.arch import WORD +import sys class TestTranslationX86(TranslationTest): def _check_cbuilder(self, cbuilder): - # We assume here that we have sse2. If not, the CPUClass - # needs to be changed to CPU386_NO_SSE2, but well. - if WORD == 4: + # msse2 and sse are always on on x86-64 + if WORD == 4 and sys.platform != 'win32': assert '-msse2' in cbuilder.eci.compile_extra assert '-mfpmath=sse' in cbuilder.eci.compile_extra diff --git a/rpython/jit/backend/x86/test/test_ztranslation_call_assembler.py b/rpython/jit/backend/x86/test/test_ztranslation_call_assembler.py --- a/rpython/jit/backend/x86/test/test_ztranslation_call_assembler.py +++ b/rpython/jit/backend/x86/test/test_ztranslation_call_assembler.py @@ -1,11 +1,13 @@ from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTestCallAssembler from rpython.translator.translator import TranslationContext from rpython.config.translationoption import DEFL_GC - +from rpython.jit.backend.x86.arch import WORD +import sys class TestTranslationCallAssemblerX86(TranslationTestCallAssembler): def _check_cbuilder(self, cbuilder): - # We assume here that we have sse2. If not, the CPUClass + #We assume here that we have sse2. If not, the CPUClass # needs to be changed to CPU386_NO_SSE2, but well. - assert '-msse2' in cbuilder.eci.compile_extra - assert '-mfpmath=sse' in cbuilder.eci.compile_extra \ No newline at end of file + if WORD == 4 and sys.platform != 'win32': + assert '-msse2' in cbuilder.eci.compile_extra + assert '-mfpmath=sse' in cbuilder.eci.compile_extra diff --git a/rpython/jit/backend/x86/test/test_ztranslation_jit_stats.py b/rpython/jit/backend/x86/test/test_ztranslation_jit_stats.py --- a/rpython/jit/backend/x86/test/test_ztranslation_jit_stats.py +++ b/rpython/jit/backend/x86/test/test_ztranslation_jit_stats.py @@ -1,11 +1,14 @@ from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTestJITStats from rpython.translator.translator import TranslationContext from rpython.config.translationoption import DEFL_GC +from rpython.jit.backend.x86.arch import WORD +import sys class TestTranslationJITStatsX86(TranslationTestJITStats): def _check_cbuilder(self, cbuilder): - # We assume here that we have sse2. If not, the CPUClass + #We assume here that we have sse2. If not, the CPUClass # needs to be changed to CPU386_NO_SSE2, but well. - assert '-msse2' in cbuilder.eci.compile_extra - assert '-mfpmath=sse' in cbuilder.eci.compile_extra \ No newline at end of file + if WORD == 4 and sys.platform != 'win32': + assert '-msse2' in cbuilder.eci.compile_extra + assert '-mfpmath=sse' in cbuilder.eci.compile_extra From noreply at buildbot.pypy.org Sat Nov 23 20:05:22 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 23 Nov 2013 20:05:22 +0100 (CET) Subject: [pypy-commit] pypy voidtype_strformat: merge with default Message-ID: <20131123190522.1437F1C0206@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: voidtype_strformat Changeset: r68291:abf0729cab66 Date: 2013-11-23 19:16 +0200 http://bitbucket.org/pypy/pypy/changeset/abf0729cab66/ Log: merge with default diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -10,6 +10,8 @@ .. branch: numpy-newbyteorder Clean up numpy types, add newbyteorder functionality -.. branch windows-packaging +.. branch: windows-packaging Package tk/tcl runtime with win32 +.. branch: armhf-singlefloat +JIT support for singlefloats on ARM using the hardfloat ABI diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -286,3 +286,20 @@ assert dt.num == 11 + def test_pass_ndarray_object_to_c(self): + skip('fixme') + from _numpypy.multiarray import ndarray + mod = self.import_extension('foo', [ + ("check_array", "METH_VARARGS", + ''' + PyObject* obj; + if (!PyArg_ParseTuple(args, "O!", &PyArray_Type, &obj)) + return NULL; + Py_INCREF(obj); + return obj; + '''), + ], prologue='#include ') + array = ndarray((3, 4), dtype='d') + assert mod.check_array(array) is array + raises(TypeError, "mod.check_array(42)") + diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -342,10 +342,8 @@ if space.is_w(w_startstop, space.w_None): start = 0 else: - start = space.int_w(w_startstop) - if start < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Indicies for islice() must be non-negative integers.")) + start = self.arg_int_w(w_startstop, 0, + "Indicies for islice() must be None or non-negative integers") w_stop = args_w[0] else: raise OperationError(space.w_TypeError, space.wrap("islice() takes at most 4 arguments (" + str(num_args) + " given)")) @@ -353,10 +351,8 @@ if space.is_w(w_stop, space.w_None): stop = -1 else: - stop = space.int_w(w_stop) - if stop < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Stop argument must be a non-negative integer or None.")) + stop = self.arg_int_w(w_stop, 0, + "Stop argument must be a non-negative integer or None.") stop = max(start, stop) # for obscure CPython compatibility if num_args == 2: @@ -364,10 +360,8 @@ if space.is_w(w_step, space.w_None): step = 1 else: - step = space.int_w(w_step) - if step < 1: - raise OperationError(space.w_ValueError, space.wrap( - "Step must be one or lager for islice().")) + step = self.arg_int_w(w_step, 1, + "Step for islice() must be a positive integer or None") else: step = 1 @@ -375,6 +369,18 @@ self.start = start self.stop = stop + def arg_int_w(self, w_obj, minimum, errormsg): + space = self.space + try: + result = space.int_w(w_obj) + except OperationError, e: + if e.async(space): + raise + result = -1 + if result < minimum: + raise OperationError(space.w_ValueError, space.wrap(errormsg)) + return result + def iter_w(self): return self.space.wrap(self) diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -304,6 +304,11 @@ raises(TypeError, itertools.islice, [], 0, 0, 0, 0) + # why not TypeError? Because CPython + raises(ValueError, itertools.islice, [], "a", 1, 2) + raises(ValueError, itertools.islice, [], 0, "a", 2) + raises(ValueError, itertools.islice, [], 0, 1, "a") + def test_chain(self): import itertools diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -211,7 +211,15 @@ "field named %s not found" % idx)) return RecordChunk(idx) if (space.isinstance_w(w_idx, space.w_int) or - space.isinstance_w(w_idx, space.w_slice)): + space.isinstance_w(w_idx, space.w_slice)): + return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) + elif isinstance(w_idx, W_NDimArray) and \ + isinstance(w_idx.implementation, scalar.Scalar): + w_idx = w_idx.get_scalar_value().item(space) + if not space.isinstance_w(w_idx, space.w_int) and \ + not space.isinstance_w(w_idx, space.w_bool): + raise OperationError(space.w_IndexError, space.wrap( + "arrays used as indices must be of integer (or boolean) type")) return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) elif space.is_w(w_idx, space.w_None): return Chunks([NewAxisChunk()]) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -151,6 +151,14 @@ endian = NPY_NATBYTE return space.wrap("%s%s%s" % (endian, basic, size)) + def descr_get_descr(self, space): + if not self.is_record_type(): + return space.newlist([space.newtuple([space.wrap(""), + self.descr_get_str(space)])]) + else: + raise OperationError(space.w_NotImplementedError, space.wrap( + "descr not implemented for record types")) + def descr_get_base(self, space): return space.wrap(self.base) @@ -447,6 +455,7 @@ fields = GetSetProperty(W_Dtype.descr_get_fields), names = GetSetProperty(W_Dtype.descr_get_names), hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), + descr = GetSetProperty(W_Dtype.descr_get_descr), ) W_Dtype.typedef.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -93,7 +93,11 @@ def descr_fill(self, space, w_value): self.fill(self.get_dtype().coerce(space, w_value)) - def descr_tostring(self, space): + def descr_tostring(self, space, w_order=None): + order = order_converter(space, w_order, NPY_CORDER) + if order == NPY_FORTRANORDER: + raise OperationError(space.w_NotImplementedError, space.wrap( + "unsupported value for order")) return space.wrap(loop.tostring(space, self)) def getitem_filter(self, space, arr): @@ -198,7 +202,8 @@ prefix) def descr_getitem(self, space, w_idx): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type(): + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + and len(w_idx.get_shape()) > 0: return self.getitem_filter(space, w_idx) try: return self.implementation.descr_getitem(space, self, w_idx) @@ -212,7 +217,8 @@ self.implementation.setitem_index(space, index_list, w_value) def descr_setitem(self, space, w_idx, w_value): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type(): + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + and len(w_idx.get_shape()) > 0: self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) return try: diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -832,6 +832,17 @@ assert x.dtype == int8 assert (x == array(42)).all() + def test_descr(self): + import numpy as np + assert np.dtype('i2')[::2].tostring() == '\x00\x01\x00\x03' assert array(0, dtype='i2').tostring() == '\x00\x00' + a = array([[1, 2], [3, 4]], dtype='i1') + for order in (None, False, 'C', 'K', 'a'): + assert a.tostring(order) == '\x01\x02\x03\x04' + import sys + for order in (True, 'F'): + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, a.tostring, order) + else: + assert a.tostring(order) == '\x01\x03\x02\x04' class AppTestRepr(BaseNumpyAppTest): @@ -3015,7 +3040,8 @@ from numpypy import dtype, array, zeros d = dtype([("x", "int", 3), ("y", "float", 5)]) - a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) + a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), + ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) for v in ['x', u'x', 0, -2]: assert (a[0][v] == [1, 2, 3]).all() @@ -3025,7 +3051,8 @@ assert (a[1][v] == [5.5, 6.5, 7.5, 8.5, 9.5]).all() for v in [-3, 2]: exc = raises(IndexError, "a[0][%d]" % v) - assert exc.value.message == "invalid index (%d)" % (v + 2 if v < 0 else v) + assert exc.value.message == "invalid index (%d)" % \ + (v + 2 if v < 0 else v) exc = raises(IndexError, "a[0]['z']") assert exc.value.message == "invalid index" exc = raises(IndexError, "a[0][None]") @@ -3097,7 +3124,8 @@ from numpypy import dtype, array d = dtype([("x", "int", 3), ("y", "float", 5)]) - a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) + a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), + ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) assert len(list(a[0])) == 2 diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -371,15 +371,19 @@ listdef.generalize(self.immutablevalue(e, False)) result = SomeList(listdef) elif tp is dict or tp is r_dict or tp is SomeOrderedDict.knowntype: + if tp is SomeOrderedDict.knowntype: + cls = SomeOrderedDict + else: + cls = SomeDict if need_const: key = Constant(x) try: return self.immutable_cache[key] except KeyError: - result = SomeDict(DictDef(self, - s_ImpossibleValue, - s_ImpossibleValue, - is_r_dict = tp is r_dict)) + result = cls(DictDef(self, + s_ImpossibleValue, + s_ImpossibleValue, + is_r_dict = tp is r_dict)) self.immutable_cache[key] = result if tp is r_dict: s_eqfn = self.immutablevalue(x.key_eq) @@ -412,10 +416,7 @@ dictdef.generalize_key(self.immutablevalue(ek, False)) dictdef.generalize_value(self.immutablevalue(ev, False)) dictdef.seen_prebuilt_key(ek) - if tp is SomeOrderedDict.knowntype: - result = SomeOrderedDict(dictdef) - else: - result = SomeDict(dictdef) + result = cls(dictdef) elif tp is weakref.ReferenceType: x1 = x() if x1 is None: diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4148,6 +4148,19 @@ a.build_types(f, [str, str]) assert ("format() is not RPython" in exc.value.msg) + def test_prebuilt_ordered_dict(self): + try: + from collections import OrderedDict + except ImportError: + py.test.skip("Please upgrade to python 2.7") + d = OrderedDict([("aa", 1)]) + + def f(): + return d + + a = self.RPythonAnnotator() + assert isinstance(a.build_types(f, []), annmodel.SomeOrderedDict) + def g(n): return [0, 1, 2, n] diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -460,13 +460,13 @@ check_negative_slice(start, end, "count") return SomeInteger(nonneg=True) - def method_strip(str, chr): + def method_strip(str, chr=None): return str.basestringclass(no_nul=str.no_nul) - def method_lstrip(str, chr): + def method_lstrip(str, chr=None): return str.basestringclass(no_nul=str.no_nul) - def method_rstrip(str, chr): + def method_rstrip(str, chr=None): return str.basestringclass(no_nul=str.no_nul) def method_join(str, s_list): diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -227,20 +227,81 @@ class HardFloatCallBuilder(ARMCallbuilder): + next_arg_vfp = 0 + next_arg_svfp = 0 + + def get_next_vfp(self, tp): + assert tp in 'fS' + if self.next_arg_vfp == -1: + return None + if tp == 'S': + i = self.next_arg_svfp + next_vfp = (i >> 1) + 1 + if not (i + 1) & 1: # i is even + self.next_arg_vfp = max(self.next_arg_vfp, next_vfp) + self.next_arg_svfp = self.next_arg_vfp << 1 + else: + self.next_arg_svfp += 1 + self.next_arg_vfp = next_vfp + lst = r.svfp_argument_regs + else: # 64bit double + i = self.next_arg_vfp + self.next_arg_vfp += 1 + if self.next_arg_svfp >> 1 == i: + self.next_arg_svfp = self.next_arg_vfp << 1 + lst = r.vfp_argument_regs + try: + return lst[i] + except IndexError: + self.next_arg_vfp = self.next_arg_svfp = -1 + return None + def prepare_arguments(self): non_float_locs = [] non_float_regs = [] float_locs = [] float_regs = [] stack_args = [] + singlefloats = None arglocs = self.arglocs argtypes = self.argtypes count = 0 # stack alignment counter on_stack = 0 - for arg in arglocs: - if arg.type != FLOAT: + for i in range(len(arglocs)): + argtype = INT + if i < len(argtypes) and argtypes[i] == 'S': + argtype = argtypes[i] + arg = arglocs[i] + if arg.is_float(): + argtype = FLOAT + reg = self.get_next_vfp(argtype) + if reg: + assert len(float_regs) < len(r.vfp_argument_regs) + float_locs.append(arg) + assert reg not in float_regs + float_regs.append(reg) + else: # float argument that needs to go on the stack + if count % 2 != 0: + stack_args.append(None) + count = 0 + on_stack += 1 + stack_args.append(arg) + on_stack += 2 + elif argtype == 'S': + # Singlefloat argument + if singlefloats is None: + singlefloats = [] + tgt = self.get_next_vfp(argtype) + if tgt: + singlefloats.append((arg, tgt)) + else: # Singlefloat argument that needs to go on the stack + # treated the same as a regular core register argument + count += 1 + on_stack += 1 + stack_args.append(arg) + else: if len(non_float_regs) < len(r.argument_regs): reg = r.argument_regs[len(non_float_regs)] non_float_locs.append(arg) @@ -249,18 +310,6 @@ count += 1 on_stack += 1 stack_args.append(arg) - else: - if len(float_regs) < len(r.vfp_argument_regs): - reg = r.vfp_argument_regs[len(float_regs)] - float_locs.append(arg) - float_regs.append(reg) - else: # float argument that needs to go on the stack - if count % 2 != 0: - stack_args.append(None) - count = 0 - on_stack += 1 - stack_args.append(arg) - on_stack += 2 # align the stack if count % 2 != 0: stack_args.append(None) @@ -275,13 +324,28 @@ non_float_locs.append(self.fnloc) non_float_regs.append(r.r4) self.fnloc = r.r4 + # remap values stored in vfp registers + remap_frame_layout(self.asm, float_locs, float_regs, r.vfp_ip) + if singlefloats: + for src, dest in singlefloats: + if src.is_float(): + assert 0, 'unsupported case' + if src.is_stack(): + # use special VLDR for 32bit + self.asm.regalloc_mov(src, r.ip) + src = r.ip + if src.is_imm(): + self.mc.gen_load_int(r.ip.value, src.value) + src = r.ip + if src.is_core_reg(): + self.mc.VMOV_cs(dest.value, src.value) # remap values stored in core registers remap_frame_layout(self.asm, non_float_locs, non_float_regs, r.ip) - # remap values stored in vfp registers - remap_frame_layout(self.asm, float_locs, float_regs, r.vfp_ip) def load_result(self): resloc = self.resloc + if self.restype == 'S': + self.mc.VMOV_sc(resloc.value, r.s0.value) # ensure the result is wellformed and stored in the correct location if resloc is not None and resloc.is_core_reg(): self._ensure_result_bit_extension(resloc, diff --git a/rpython/jit/backend/arm/codebuilder.py b/rpython/jit/backend/arm/codebuilder.py --- a/rpython/jit/backend/arm/codebuilder.py +++ b/rpython/jit/backend/arm/codebuilder.py @@ -178,6 +178,30 @@ | (dm & 0xF)) self.write32(instr) + def VMOV_sc(self, dest, src): + """move a single precision vfp register[src] to a core reg[dest]""" + self._VMOV_32bit(src, dest, to_arm_register=1) + + def VMOV_cs(self, dest, src): + """move a core register[src] to a single precision vfp + register[dest]""" + self._VMOV_32bit(dest, src, to_arm_register=0) + + def _VMOV_32bit(self, float_reg, core_reg, to_arm_register, cond=cond.AL): + """This instruction transfers the contents of a single-precision VFP + register to an ARM core register, or the contents of an ARM core + register to a single-precision VFP register. + """ + instr = (cond << 28 + | 0xE << 24 + | to_arm_register << 20 + | ((float_reg >> 1) & 0xF) << 16 + | core_reg << 12 + | 0xA << 8 + | (float_reg & 0x1) << 7 + | 1 << 4) + self.write32(instr) + def VMOV_cc(self, dd, dm, cond=cond.AL): sz = 1 # for 64-bit mode instr = (cond << 28 @@ -198,8 +222,16 @@ self._VCVT(target, source, cond, 0, 1) def _VCVT(self, target, source, cond, opc2, sz): - D = 0 - M = 0 + # A8.6.295 + to_integer = (opc2 >> 2) & 1 + if to_integer: + D = target & 1 + target >>= 1 + M = (source >> 4) & 1 + else: + M = source & 1 + source >>= 1 + D = (target >> 4) & 1 op = 1 instr = (cond << 28 | 0xEB8 << 16 @@ -216,8 +248,8 @@ def _VCVT_single_double(self, target, source, cond, sz): # double_to_single = (sz == '1'); - D = 0 - M = 0 + D = target & 1 if sz else (target >> 4) & 1 + M = (source >> 4) & 1 if sz else source & 1 instr = (cond << 28 | 0xEB7 << 16 | 0xAC << 4 diff --git a/rpython/jit/backend/arm/locations.py b/rpython/jit/backend/arm/locations.py --- a/rpython/jit/backend/arm/locations.py +++ b/rpython/jit/backend/arm/locations.py @@ -55,12 +55,8 @@ type = FLOAT width = 2 * WORD - def get_single_precision_regs(self): - return [VFPRegisterLocation(i) for i in - [self.value * 2, self.value * 2 + 1]] - def __repr__(self): - return 'vfp%d' % self.value + return 'vfp(d%d)' % self.value def is_core_reg(self): return False @@ -74,6 +70,14 @@ def is_float(self): return True +class SVFPRegisterLocation(VFPRegisterLocation): + """Single Precission VFP Register""" + _immutable_ = True + width = WORD + type = 'S' + + def __repr__(self): + return 'vfp(s%d)' % self.value class ImmLocation(AssemblerLocation): _immutable_ = True diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1102,17 +1102,16 @@ arg, res = arglocs assert arg.is_vfp_reg() assert res.is_core_reg() - self.mc.VCVT_float_to_int(r.vfp_ip.value, arg.value) - self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value) + self.mc.VCVT_float_to_int(r.svfp_ip.value, arg.value) + self.mc.VMOV_sc(res.value, r.svfp_ip.value) return fcond def emit_op_cast_int_to_float(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert res.is_vfp_reg() assert arg.is_core_reg() - self.mc.MOV_ri(r.ip.value, 0) - self.mc.VMOV_cr(res.value, arg.value, r.ip.value) - self.mc.VCVT_int_to_float(res.value, res.value) + self.mc.VMOV_cs(r.svfp_ip.value, arg.value) + self.mc.VCVT_int_to_float(res.value, r.svfp_ip.value) return fcond emit_op_llong_add = gen_emit_float_op('llong_add', 'VADD_i64') @@ -1147,15 +1146,14 @@ arg, res = arglocs assert arg.is_vfp_reg() assert res.is_core_reg() - self.mc.VCVT_f64_f32(r.vfp_ip.value, arg.value) - self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value) + self.mc.VCVT_f64_f32(r.svfp_ip.value, arg.value) + self.mc.VMOV_sc(res.value, r.svfp_ip.value) return fcond def emit_op_cast_singlefloat_to_float(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert res.is_vfp_reg() assert arg.is_core_reg() - self.mc.MOV_ri(r.ip.value, 0) - self.mc.VMOV_cr(res.value, arg.value, r.ip.value) - self.mc.VCVT_f32_f64(res.value, res.value) + self.mc.VMOV_cs(r.svfp_ip.value, arg.value) + self.mc.VCVT_f32_f64(res.value, r.svfp_ip.value) return fcond diff --git a/rpython/jit/backend/arm/registers.py b/rpython/jit/backend/arm/registers.py --- a/rpython/jit/backend/arm/registers.py +++ b/rpython/jit/backend/arm/registers.py @@ -1,8 +1,10 @@ from rpython.jit.backend.arm.locations import VFPRegisterLocation +from rpython.jit.backend.arm.locations import SVFPRegisterLocation from rpython.jit.backend.arm.locations import RegisterLocation registers = [RegisterLocation(i) for i in range(16)] vfpregisters = [VFPRegisterLocation(i) for i in range(16)] +svfpregisters = [SVFPRegisterLocation(i) for i in range(32)] [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15] = registers @@ -10,6 +12,10 @@ [d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15] = vfpregisters +# single precission VFP registers, 32-bit +for i in range(32): + globals()['s%d' % i] = svfpregisters[i] + # aliases for registers fp = r11 ip = r12 @@ -17,6 +23,7 @@ lr = r14 pc = r15 vfp_ip = d15 +svfp_ip = s31 all_regs = [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10] all_vfp_regs = vfpregisters[:-1] @@ -27,6 +34,7 @@ callee_restored_registers = callee_resp + [pc] vfp_argument_regs = caller_vfp_resp = [d0, d1, d2, d3, d4, d5, d6, d7] +svfp_argument_regs = [globals()['s%i' % i] for i in range(16)] callee_vfp_resp = [d8, d9, d10, d11, d12, d13, d14, d15] callee_saved_vfp_registers = callee_vfp_resp diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -22,7 +22,7 @@ supports_floats = True supports_longlong = False # XXX requires an implementation of # read_timestamp that works in user mode - supports_singlefloats = not detect_hardfloat() + supports_singlefloats = True from rpython.jit.backend.arm.arch import JITFRAME_FIXED_SIZE all_reg_indexes = range(len(all_regs)) diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -4,7 +4,6 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rarithmetic import r_uint -from rpython.rlib.objectmodel import we_are_translated from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform @@ -80,38 +79,6 @@ RTLD_NOW = cConfig.RTLD_NOW RTLD_LAZY = cConfig.RTLD_LAZY - _t_opened = {} - - def t_dlopen(name): - # for direct execution: can't use the regular way on FreeBSD :-( - # http://factor-language.blogspot.de/2009/02/note-about-libdl-functions-on-netbsd.html - import ctypes - if name: - name = rffi.charp2str(name) - else: - name = None - try: - res = ctypes.cdll.LoadLibrary(name) - except OSError, e: - raise DLOpenError(str(e)) - h = rffi.cast(rffi.VOIDP, res._handle) - _t_opened[rffi.cast(rffi.LONG, h)] = res - return h - - def t_dlclose(handle): - _t_opened.pop(rffi.cast(rffi.LONG, handle)) - return rffi.cast(rffi.INT, 0) - - def t_dldym(handle, name): - import ctypes - lib = _t_opened[rffi.cast(rffi.LONG, handle)] - try: - symbol = lib[name] - except AttributeError: - raise KeyError(name) - res = ctypes.cast(symbol, ctypes.c_void_p) - return rffi.cast(rffi.VOIDP, res.value or 0) - def dlerror(): # XXX this would never work on top of ll2ctypes, because # ctypes are calling dlerror itself, unsure if I can do much in this @@ -124,8 +91,6 @@ def dlopen(name, mode=-1): """ Wrapper around C-level dlopen """ - if not we_are_translated(): - return t_dlopen(name) if mode == -1: if RTLD_LOCAL is not None: mode = RTLD_LOCAL @@ -139,16 +104,11 @@ raise DLOpenError(err) return res - def dlclose(handle): - if not we_are_translated(): - return t_dlclose(handle) - return c_dlclose(handle) + dlclose = c_dlclose def dlsym(libhandle, name): """ Wrapper around C-level dlsym """ - if not we_are_translated(): - return t_dldym(libhandle, name) res = c_dlsym(libhandle, name) if not res: raise KeyError(name) diff --git a/rpython/rlib/test/test_rdynload.py b/rpython/rlib/test/test_rdynload.py --- a/rpython/rlib/test/test_rdynload.py +++ b/rpython/rlib/test/test_rdynload.py @@ -21,4 +21,3 @@ lltype.Signed)), dlsym(lib, 'abs')) assert 1 == handle(1) assert 1 == handle(-1) - dlclose(lib) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -9,6 +9,7 @@ from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import ll_str, llmemory +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.lltypesystem.lltype import (GcStruct, Signed, Array, Char, UniChar, Ptr, malloc, Bool, Void, GcArray, nullptr, cast_primitive, typeOf, staticAdtMethod, GcForwardReference) @@ -402,6 +403,46 @@ return result @jit.elidable + def ll_strip_default(s, left, right): + s_len = len(s.chars) + if s_len == 0: + return s.empty() + lpos = 0 + rpos = s_len - 1 + if left: + while lpos < rpos and s.chars[lpos].isspace(): + lpos += 1 + if right: + while lpos < rpos + 1 and s.chars[rpos].isspace(): + rpos -= 1 + if rpos < lpos: + return s.empty() + r_len = rpos - lpos + 1 + result = s.malloc(r_len) + s.copy_contents(s, result, lpos, 0, r_len) + return result + + @jit.elidable + def ll_strip_multiple(s, s2, left, right): + s_len = len(s.chars) + if s_len == 0: + return s.empty() + lpos = 0 + rpos = s_len - 1 + if left: + while lpos < rpos and LLHelpers.ll_contains(s2, s.chars[lpos]): + lpos += 1 + if right: + while lpos < rpos + 1 and LLHelpers.ll_contains(s2, s.chars[rpos]): + rpos -= 1 + if rpos < lpos: + return s.empty() + r_len = rpos - lpos + 1 + result = s.malloc(r_len) + s.copy_contents(s, result, lpos, 0, r_len) + return result + + @jit.elidable def ll_upper(s): s_chars = s.chars s_len = len(s_chars) diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -231,11 +231,22 @@ def rtype_method_strip(self, hop, left=True, right=True): rstr = hop.args_r[0].repr v_str = hop.inputarg(rstr.repr, arg=0) - v_char = hop.inputarg(rstr.char_repr, arg=1) - v_left = hop.inputconst(Bool, left) - v_right = hop.inputconst(Bool, right) + args_v = [v_str] + if len(hop.args_s) == 2: + if isinstance(hop.args_s[1], annmodel.SomeString): + v_stripstr = hop.inputarg(rstr.repr, arg=1) + args_v.append(v_stripstr) + func = self.ll.ll_strip_multiple + else: + v_char = hop.inputarg(rstr.char_repr, arg=1) + args_v.append(v_char) + func = self.ll.ll_strip + else: + func = self.ll.ll_strip_default + args_v.append(hop.inputconst(Bool, left)) + args_v.append(hop.inputconst(Bool, right)) hop.exception_is_here() - return hop.gendirectcall(self.ll.ll_strip, v_str, v_char, v_left, v_right) + return hop.gendirectcall(func, *args_v) def rtype_method_lstrip(self, hop): return self.rtype_method_strip(hop, left=True, right=False) diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -9,6 +9,7 @@ from rpython.rtyper.rstr import AbstractLLHelpers from rpython.rtyper.rtyper import TyperError from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.rtyper.annlowlevel import llstr, hlstr def test_parse_fmt(): @@ -457,6 +458,29 @@ res = self.interpret(left2, []) assert self.ll_to_string(res) == const('a') + def test_strip_multiple_chars(self): + const = self.const + def both(): + return const('!ab!').strip(const('!a')) + def left(): + return const('!+ab!').lstrip(const('!+')) + def right(): + return const('!ab!+').rstrip(const('!+')) + def empty(): + return const(' \t\t ').strip('\t ') + def left2(): + return const('a ').strip(' \t') + res = self.interpret(both, []) + assert self.ll_to_string(res) == const('b') + res = self.interpret(left, []) + assert self.ll_to_string(res) == const('ab!') + res = self.interpret(right, []) + assert self.ll_to_string(res) == const('!ab') + res = self.interpret(empty, []) + assert self.ll_to_string(res) == const('') + res = self.interpret(left2, []) + assert self.ll_to_string(res) == const('a') + def test_upper(self): const = self.const constchar = self.constchar @@ -1143,3 +1167,16 @@ self.interpret(f, [array, 4]) assert list(array) == list('abc'*4) lltype.free(array, flavor='raw') + + def test_strip_no_arg(self): + strings = [" xyz ", "", "\t\vx"] + + def f(i): + return strings[i].strip() + + res = self.interpret(f, [0]) + assert hlstr(res) == "xyz" + res = self.interpret(f, [1]) + assert hlstr(res) == "" + res = self.interpret(f, [2]) + assert hlstr(res) == "x" diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -49,7 +49,7 @@ sys.stdout.flush() -if sys.platform != 'win32' and hasattr(os, 'fork'): +if sys.platform != 'win32' and hasattr(os, 'fork') and not os.getenv("PYPY_DONT_RUN_SUBPROCESS", None): # do this at import-time, when the process is still tiny _source = os.path.dirname(os.path.abspath(__file__)) _source = os.path.join(_source, 'runsubprocess.py') # and not e.g. '.pyc' diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py --- a/rpython/translator/c/test/test_genc.py +++ b/rpython/translator/c/test/test_genc.py @@ -574,6 +574,22 @@ fn = compile(chooser, [bool]) assert fn(True) +def test_ordered_dict(): + try: + from collections import OrderedDict + except ImportError: + py.test.skip("Please update to Python 2.7") + + expected = [('ea', 1), ('bb', 2), ('c', 3), ('d', 4), ('e', 5), + ('ef', 6)] + d = OrderedDict(expected) + + def f(): + assert d.items() == expected + + fn = compile(f, []) + fn() + def test_inhibit_tail_call(): def foobar_fn(n): return 42 From noreply at buildbot.pypy.org Sat Nov 23 20:34:25 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 23 Nov 2013 20:34:25 +0100 (CET) Subject: [pypy-commit] pypy voidtype_strformat: close branch to be merged Message-ID: <20131123193425.19FA11C01F7@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: voidtype_strformat Changeset: r68294:074fa1db3ea6 Date: 2013-11-23 21:32 +0200 http://bitbucket.org/pypy/pypy/changeset/074fa1db3ea6/ Log: close branch to be merged From noreply at buildbot.pypy.org Sat Nov 23 20:34:26 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 23 Nov 2013 20:34:26 +0100 (CET) Subject: [pypy-commit] pypy default: merge voidtype_strformat, which improves str formatting for record ndarrays Message-ID: <20131123193426.883CE1C01F7@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r68295:f8f583791850 Date: 2013-11-23 21:33 +0200 http://bitbucket.org/pypy/pypy/changeset/f8f583791850/ Log: merge voidtype_strformat, which improves str formatting for record ndarrays diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -15,3 +15,6 @@ .. branch: armhf-singlefloat JIT support for singlefloats on ARM using the hardfloat ABI + +.. branch: voidtype_strformat +Better support for record numpy arrays diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -249,12 +249,13 @@ return space.wrap(self.dump_data()) return space.call_function(cache.w_array_str, self) - def dump_data(self): + def dump_data(self, prefix='array(', suffix=')'): i = self.create_iter() first = True dtype = self.get_dtype() s = StringBuilder() - s.append('array([') + s.append(prefix) + s.append('[') while not i.done(): if first: first = False @@ -262,7 +263,8 @@ s.append(', ') s.append(dtype.itemtype.str_format(i.getitem())) i.next() - s.append('])') + s.append(']') + s.append(suffix) return s.build() def create_iter(self, shape=None, backward_broadcast=False, require_index=False): diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -61,10 +61,22 @@ def apply(self, space, orig_arr): arr = orig_arr.implementation ofs, subdtype = arr.dtype.fields[self.name] - # strides backstrides are identical, ofs only changes start - return W_NDimArray.new_slice(space, arr.start + ofs, arr.get_strides(), - arr.get_backstrides(), - arr.shape, arr, orig_arr, subdtype) + # ofs only changes start + # create a view of the original array by extending + # the shape, strides, backstrides of the array + from pypy.module.micronumpy.support import calc_strides + strides, backstrides = calc_strides(subdtype.shape, + subdtype.subdtype, arr.order) + final_shape = arr.shape + subdtype.shape + final_strides = arr.get_strides() + strides + final_backstrides = arr.get_backstrides() + backstrides + final_dtype = subdtype + print self.name,'strides',arr.get_strides(),strides + if subdtype.subdtype: + final_dtype = subdtype.subdtype + return W_NDimArray.new_slice(space, arr.start + ofs, final_strides, + final_backstrides, + final_shape, arr, orig_arr, final_dtype) class Chunks(BaseChunk): def __init__(self, l): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3096,7 +3096,9 @@ ] h = np.array(buf, dtype=descr) assert len(h) == 2 - skip('broken') # XXX + assert h['x'].shape == (2, 2) + assert h['y'].strides == (41, 16, 8) + assert h['z'].shape == (2,) for v in (h, h[0], h['x']): repr(v) # check for crash in repr assert (h['x'] == np.array([buf[0][0], @@ -3127,6 +3129,22 @@ assert len(list(a[0])) == 2 + def test_3d_record(self): + from numpypy import dtype, array + dt = dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 2, 3))]) + a = array([('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]])], + dtype=dt) + s = str(a) + i = a.item() + assert isinstance(i, tuple) + assert len(i) == 4 + skip('incorrect formatting via dump_data') + assert s.endswith("[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " + "[[7, 8, 9], [10, 11, 12]]])]") + + def test_issue_1589(self): import numpypy as numpy c = numpy.array([[(1, 2, 'a'), (3, 4, 'b')], [(5, 6, 'c'), (7, 8, 'd')]], diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1789,6 +1789,40 @@ dtype.subdtype) return W_NDimArray(implementation) + def read(self, arr, i, offset, dtype=None): + if dtype is None: + dtype = arr.dtype + return interp_boxes.W_VoidBox(arr, i + offset, dtype) + + @jit.unroll_safe + def str_format(self, box): + assert isinstance(box, interp_boxes.W_VoidBox) + arr = self.readarray(box.arr, box.ofs, 0, box.dtype) + return arr.dump_data(prefix='', suffix='') + + def to_builtin_type(self, space, item): + ''' From the documentation of ndarray.item(): + "Void arrays return a buffer object for item(), + unless fields are defined, in which case a tuple is returned." + ''' + assert isinstance(item, interp_boxes.W_VoidBox) + dt = item.arr.dtype + ret_unwrapped = [] + for name in dt.fieldnames: + ofs, dtype = dt.fields[name] + if isinstance(dtype.itemtype, VoidType): + read_val = dtype.itemtype.readarray(item.arr, ofs, 0, dtype) + else: + read_val = dtype.itemtype.read(item.arr, ofs, 0, dtype) + if isinstance (read_val, interp_boxes.W_StringBox): + # StringType returns a str + read_val = space.wrap(dtype.itemtype.to_str(read_val)) + ret_unwrapped = ret_unwrapped + [read_val,] + if len(ret_unwrapped) == 0: + raise OperationError(space.w_NotImplementedError, space.wrap( + "item() for Void aray with no fields not implemented")) + return space.newtuple(ret_unwrapped) + class RecordType(FlexibleType): T = lltype.Char @@ -1848,7 +1882,8 @@ first = False else: pieces.append(", ") - pieces.append(tp.str_format(tp.read(box.arr, box.ofs, ofs))) + val = tp.read(box.arr, box.ofs, ofs, subdtype) + pieces.append(tp.str_format(val)) pieces.append(")") return "".join(pieces) From noreply at buildbot.pypy.org Sat Nov 23 23:00:22 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 23 Nov 2013 23:00:22 +0100 (CET) Subject: [pypy-commit] pypy default: fix zjit failure Message-ID: <20131123220022.1683F1C0206@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r68296:ddc151cf2838 Date: 2013-11-23 23:57 +0200 http://bitbucket.org/pypy/pypy/changeset/ddc151cf2838/ Log: fix zjit failure diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -56,6 +56,8 @@ self.aliases = aliases self.float_type = float_type self.fields = fields + if fieldnames is None: + fieldnames = [] self.fieldnames = fieldnames self.shape = list(shape) self.subdtype = subdtype @@ -214,15 +216,15 @@ self.name = "void" + str(8 * self.get_size()) def descr_get_names(self, space): - if self.fieldnames is None: + if len(self.fieldnames) == 0: return space.w_None return space.newtuple([space.wrap(name) for name in self.fieldnames]) def set_names(self, space, w_names): + self.fieldnames = [] if w_names == space.w_None: - self.fieldnames = None + return else: - self.fieldnames = [] iter = space.iter(w_names) while True: try: From noreply at buildbot.pypy.org Sun Nov 24 09:25:18 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 24 Nov 2013 09:25:18 +0100 (CET) Subject: [pypy-commit] pypy default: fix for issue 1635, building extension module with MSVC Message-ID: <20131124082518.9A9EA1C021C@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r68297:ea530af6eb9c Date: 2013-11-24 10:25 +0200 http://bitbucket.org/pypy/pypy/changeset/ea530af6eb9c/ Log: fix for issue 1635, building extension module with MSVC diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -25,6 +25,20 @@ #define Py_UNICODE_SIZE 2 #endif +#if defined(_MSC_VER) + /* So MSVC users need not specify the .lib file in + * their Makefile (other compilers are generally + * taken care of by distutils.) */ +# ifdef _DEBUG +# error("debug first with cpython") +# pragma comment(lib,"python27.lib") +# else +# pragma comment(lib,"python27.lib") +# endif /* _DEBUG */ +#endif /* _MSC_VER */ + + + #ifdef __cplusplus } #endif From noreply at buildbot.pypy.org Sun Nov 24 11:23:34 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 24 Nov 2013 11:23:34 +0100 (CET) Subject: [pypy-commit] pypy default: Add a passing test Message-ID: <20131124102334.F42341C0095@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68298:838ae931bcbd Date: 2013-11-24 11:22 +0100 http://bitbucket.org/pypy/pypy/changeset/838ae931bcbd/ Log: Add a passing test diff --git a/rpython/translator/c/test/test_exception.py b/rpython/translator/c/test/test_exception.py --- a/rpython/translator/c/test/test_exception.py +++ b/rpython/translator/c/test/test_exception.py @@ -156,3 +156,20 @@ assert res == 42 res = f1(0) assert res == 100 + +def test_dict_keyerror_inside_try_finally(): + class CtxMgr: + def __enter__(self): + return 42 + def __exit__(self, *args): + pass + def fn(x): + d = {5: x} + with CtxMgr() as forty_two: + try: + return d[x] + except KeyError: + return forty_two + f1 = getcompiledopt(fn, [int]) + res = f1(100) + assert res == 42 From noreply at buildbot.pypy.org Sun Nov 24 11:49:38 2013 From: noreply at buildbot.pypy.org (oberstet) Date: Sun, 24 Nov 2013 11:49:38 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: allow disable stripping/tk via env var Message-ID: <20131124104938.0F2AA1C021C@cobra.cs.uni-duesseldorf.de> Author: Tobias Oberstein Branch: release-2.2.x Changeset: r68299:d223aed751df Date: 2013-11-18 14:04 +0100 http://bitbucket.org/pypy/pypy/changeset/d223aed751df/ Log: allow disable stripping/tk via env var (transplanted from 679ca2fdefb681e38d4a4183979e44da642fe61d) diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -232,5 +232,11 @@ else: print_usage() + if os.environ.has_key("PYPY_PACKAGE_NOSTRIP"): + kw['nostrip'] = True + + if os.environ.has_key("PYPY_PACKAGE_WITHOUTTK"): + kw['withouttk'] = True + args = args[i:] package(*args, **kw) From noreply at buildbot.pypy.org Sun Nov 24 11:49:39 2013 From: noreply at buildbot.pypy.org (oberstet) Date: Sun, 24 Nov 2013 11:49:39 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: allow disable strip/tk via env vars Message-ID: <20131124104939.4646F1C021C@cobra.cs.uni-duesseldorf.de> Author: Tobias Oberstein Branch: release-2.2.x Changeset: r68300:bf1821ecc215 Date: 2013-11-18 14:10 +0100 http://bitbucket.org/pypy/pypy/changeset/bf1821ecc215/ Log: allow disable strip/tk via env vars (transplanted from 76bc9f2cd21509db16b266a543fae7e708ff1e27) diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -238,5 +238,11 @@ if os.environ.has_key("PYPY_PACKAGE_WITHOUTTK"): kw['withouttk'] = True + if os.environ.has_key("PYPY_PACKAGE_NOSTRIP"): + kw['nostrip'] = True + + if os.environ.has_key("PYPY_PACKAGE_WITHOUTTK"): + kw['withouttk'] = True + args = args[i:] package(*args, **kw) From noreply at buildbot.pypy.org Sun Nov 24 11:49:40 2013 From: noreply at buildbot.pypy.org (oberstet) Date: Sun, 24 Nov 2013 11:49:40 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: FreeBSD 9.2 / Tcl/Tk 8.6 paths include and lib paths Message-ID: <20131124104940.7E9F21C021C@cobra.cs.uni-duesseldorf.de> Author: Tobias Oberstein Branch: release-2.2.x Changeset: r68301:fd729a097f6f Date: 2013-11-18 14:18 +0100 http://bitbucket.org/pypy/pypy/changeset/fd729a097f6f/ Log: FreeBSD 9.2 / Tcl/Tk 8.6 paths include and lib paths (transplanted from 68fe795f0c671d81dee4da49bb3bd193225417f9) diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -112,6 +112,10 @@ incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform.startswith("freebsd"): + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11'] + linklibs = ['tk86', 'tcl86'] + libdirs = ['/usr/local/lib'] elif sys.platform == 'win32': incdirs = [] linklibs = ['tcl85', 'tk85'] From noreply at buildbot.pypy.org Sun Nov 24 11:49:41 2013 From: noreply at buildbot.pypy.org (oberstet) Date: Sun, 24 Nov 2013 11:49:41 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: C header include paths .. once more Message-ID: <20131124104941.B81E61C021C@cobra.cs.uni-duesseldorf.de> Author: Tobias Oberstein Branch: release-2.2.x Changeset: r68302:cd21588f10bf Date: 2013-11-18 17:08 +0100 http://bitbucket.org/pypy/pypy/changeset/cd21588f10bf/ Log: C header include paths .. once more (transplanted from b6c763cd5357318c6c3f914e340f92529db4bf33) diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -113,7 +113,7 @@ linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] elif sys.platform.startswith("freebsd"): - incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11'] + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include'] linklibs = ['tk86', 'tcl86'] libdirs = ['/usr/local/lib'] elif sys.platform == 'win32': From noreply at buildbot.pypy.org Sun Nov 24 11:49:43 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 24 Nov 2013 11:49:43 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Fix for SSLSocket.makefile() Message-ID: <20131124104943.0DF5F1C021C@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: release-2.2.x Changeset: r68303:10eb72c572ed Date: 2013-11-21 10:09 -0800 http://bitbucket.org/pypy/pypy/changeset/10eb72c572ed/ Log: Fix for SSLSocket.makefile() Test program: pypy -c "import socket, ssl; s = ssl.wrap_socket(socket.create_connection(('pypi.python.org', 443))); s.makefile().close(); print s.fileno()" (transplanted from 963c6d6d7d6c8cb32d1e338ef620e9edd9e479fa) diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: From noreply at buildbot.pypy.org Sun Nov 24 11:49:44 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 24 Nov 2013 11:49:44 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Fix for issue #1646 Message-ID: <20131124104944.4127B1C021C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r68304:54f5eab57b27 Date: 2013-11-23 16:21 +0100 http://bitbucket.org/pypy/pypy/changeset/54f5eab57b27/ Log: Fix for issue #1646 (transplanted from eddf9beed68ac6f1dd5569d719c1b8cffe90e7a1) diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -342,10 +342,8 @@ if space.is_w(w_startstop, space.w_None): start = 0 else: - start = space.int_w(w_startstop) - if start < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Indicies for islice() must be non-negative integers.")) + start = self.arg_int_w(w_startstop, 0, + "Indicies for islice() must be None or non-negative integers") w_stop = args_w[0] else: raise OperationError(space.w_TypeError, space.wrap("islice() takes at most 4 arguments (" + str(num_args) + " given)")) @@ -353,10 +351,8 @@ if space.is_w(w_stop, space.w_None): stop = -1 else: - stop = space.int_w(w_stop) - if stop < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Stop argument must be a non-negative integer or None.")) + stop = self.arg_int_w(w_stop, 0, + "Stop argument must be a non-negative integer or None.") stop = max(start, stop) # for obscure CPython compatibility if num_args == 2: @@ -364,10 +360,8 @@ if space.is_w(w_step, space.w_None): step = 1 else: - step = space.int_w(w_step) - if step < 1: - raise OperationError(space.w_ValueError, space.wrap( - "Step must be one or lager for islice().")) + step = self.arg_int_w(w_step, 1, + "Step for islice() must be a positive integer or None") else: step = 1 @@ -375,6 +369,18 @@ self.start = start self.stop = stop + def arg_int_w(self, w_obj, minimum, errormsg): + space = self.space + try: + result = space.int_w(w_obj) + except OperationError, e: + if e.async(space): + raise + result = -1 + if result < minimum: + raise OperationError(space.w_ValueError, space.wrap(errormsg)) + return result + def iter_w(self): return self.space.wrap(self) diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -304,6 +304,11 @@ raises(TypeError, itertools.islice, [], 0, 0, 0, 0) + # why not TypeError? Because CPython + raises(ValueError, itertools.islice, [], "a", 1, 2) + raises(ValueError, itertools.islice, [], 0, "a", 2) + raises(ValueError, itertools.islice, [], 0, 1, "a") + def test_chain(self): import itertools From noreply at buildbot.pypy.org Sun Nov 24 11:49:45 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 24 Nov 2013 11:49:45 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: fix for issue 1635, building extension module with MSVC Message-ID: <20131124104945.5E1FE1C021C@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.2.x Changeset: r68305:dd150c0b090f Date: 2013-11-24 10:25 +0200 http://bitbucket.org/pypy/pypy/changeset/dd150c0b090f/ Log: fix for issue 1635, building extension module with MSVC (transplanted from ea530af6eb9c23550c80cb6ed17ae9de58f9cc57) diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -25,6 +25,20 @@ #define Py_UNICODE_SIZE 2 #endif +#if defined(_MSC_VER) + /* So MSVC users need not specify the .lib file in + * their Makefile (other compilers are generally + * taken care of by distutils.) */ +# ifdef _DEBUG +# error("debug first with cpython") +# pragma comment(lib,"python27.lib") +# else +# pragma comment(lib,"python27.lib") +# endif /* _DEBUG */ +#endif /* _MSC_VER */ + + + #ifdef __cplusplus } #endif From noreply at buildbot.pypy.org Sun Nov 24 11:49:46 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 24 Nov 2013 11:49:46 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: merge heads Message-ID: <20131124104946.91F681C021C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r68306:ae406fcaf180 Date: 2013-11-24 11:49 +0100 http://bitbucket.org/pypy/pypy/changeset/ae406fcaf180/ Log: merge heads diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -49,7 +49,7 @@ sys.stdout.flush() -if sys.platform != 'win32' and hasattr(os, 'fork'): +if sys.platform != 'win32' and hasattr(os, 'fork') and not os.getenv("PYPY_DONT_RUN_SUBPROCESS", None): # do this at import-time, when the process is still tiny _source = os.path.dirname(os.path.abspath(__file__)) _source = os.path.join(_source, 'runsubprocess.py') # and not e.g. '.pyc' From noreply at buildbot.pypy.org Sun Nov 24 14:14:40 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 24 Nov 2013 14:14:40 +0100 (CET) Subject: [pypy-commit] cffi default: Update to version 0.8.1 Message-ID: <20131124131440.F2E591C1176@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1425:fc756c06dd34 Date: 2013-11-24 12:07 +0100 http://bitbucket.org/cffi/cffi/changeset/fc756c06dd34/ Log: Update to version 0.8.1 diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8" -__version_info__ = (0, 8) +__version__ = "0.8.1" +__version_info__ = (0, 8, 1) diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '0.8' # The full version, including alpha/beta/rc tags. -release = '0.8' +release = '0.8.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -88,7 +88,7 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.1.tar.gz - Or grab the most current version by following the instructions below. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -107,7 +107,7 @@ `Mailing list `_ """, - version='0.8', + version='0.8.1', packages=['cffi'], zip_safe=False, diff --git a/testing/test_version.py b/testing/test_version.py --- a/testing/test_version.py +++ b/testing/test_version.py @@ -9,6 +9,7 @@ '0.4.2': '0.4', # did not change '0.7.1': '0.7', # did not change '0.7.2': '0.7', # did not change + '0.8.1': '0.8', # did not change } def test_version(): From noreply at buildbot.pypy.org Sun Nov 24 14:14:42 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 24 Nov 2013 14:14:42 +0100 (CET) Subject: [pypy-commit] cffi release-0.8: Merge for release 0.8.1 Message-ID: <20131124131442.3E2CE1C144A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1426:ec9d113aaf26 Date: 2013-11-24 14:13 +0100 http://bitbucket.org/cffi/cffi/changeset/ec9d113aaf26/ Log: Merge for release 0.8.1 diff --git a/MANIFEST.in b/MANIFEST.in --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,5 @@ recursive-include cffi *.py -recursive-include c *.c *.h *.asm *.py +recursive-include c *.c *.h *.asm *.py win64.obj recursive-include testing *.py recursive-include doc *.py *.rst Makefile *.bat recursive-include demo py.cleanup *.py diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -1097,7 +1097,9 @@ assert strlenaddr == cast(BVoidP, strlen) def test_read_variable(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -1105,7 +1107,9 @@ assert stderr == cast(BVoidP, _testfunc(8)) def test_read_variable_as_unknown_length_array(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BCharP = new_pointer_type(new_primitive_type("char")) BArray = new_array_type(BCharP, None) @@ -1115,7 +1119,9 @@ # ^^ and not 'char[]', which is basically not allowed and would crash def test_write_variable(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8" -__version_info__ = (0, 8) +__version__ = "0.8.1" +__version_info__ = (0, 8, 1) diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '0.8' # The full version, including alpha/beta/rc tags. -release = '0.8' +release = '0.8.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -88,13 +88,13 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.1.tar.gz - Or grab the most current version by following the instructions below. - - MD5: e61deb0515311bb42d5d58b9403bc923 + - MD5: ... - - SHA: 8332429193cb74d74f3347af180b448425d7d176 + - SHA: ... * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -48,9 +48,14 @@ try: compiler.compile(['c/check__thread.c']) except distutils.errors.CompileError: - print >> sys.stderr, "will not use '__thread' in the C code" + sys.stderr.write("the above error message can be safely ignored;\n") + sys.stderr.write("will not use '__thread' in the C code\n") else: define_macros.append(('USE__THREAD', None)) + try: + os.unlink('c/check__thread.o') + except OSError: + pass def use_pkg_config(): _ask_pkg_config(include_dirs, '--cflags-only-I', '-I', sysroot=True) @@ -102,7 +107,7 @@ `Mailing list `_ """, - version='0.8', + version='0.8.1', packages=['cffi'], zip_safe=False, diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1637,7 +1637,11 @@ #include #define alloca _alloca #else - #include + # ifdef __FreeBSD__ + # include + # else + # include + # endif #endif static int (*python_callback)(int how_many, int *values); static int c_callback(int how_many, ...) { diff --git a/testing/test_version.py b/testing/test_version.py --- a/testing/test_version.py +++ b/testing/test_version.py @@ -9,6 +9,7 @@ '0.4.2': '0.4', # did not change '0.7.1': '0.7', # did not change '0.7.2': '0.7', # did not change + '0.8.1': '0.8', # did not change } def test_version(): From noreply at buildbot.pypy.org Sun Nov 24 14:14:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 24 Nov 2013 14:14:43 +0100 (CET) Subject: [pypy-commit] cffi release-0.8: MD5/SHA1 Message-ID: <20131124131443.482561C1473@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1427:d531f7dc59bc Date: 2013-11-24 14:14 +0100 http://bitbucket.org/cffi/cffi/changeset/d531f7dc59bc/ Log: MD5/SHA1 diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -92,9 +92,9 @@ - Or grab the most current version by following the instructions below. - - MD5: ... + - MD5: 1a877bf113bfe90fdefedbf9e39310d2 - - SHA: ... + - SHA: d46b7cf92956fa01d9f8e0a8d3c7e2005ae40893 * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From noreply at buildbot.pypy.org Sun Nov 24 14:20:15 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 24 Nov 2013 14:20:15 +0100 (CET) Subject: [pypy-commit] cffi default: No real clue, trying to improve the display on readthedocs.org Message-ID: <20131124132015.7ACDC1C1176@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1428:491349751193 Date: 2013-11-24 14:20 +0100 http://bitbucket.org/cffi/cffi/changeset/491349751193/ Log: No real clue, trying to improve the display on readthedocs.org diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,9 +1,6 @@ CFFI documentation ================================ -.. toctree:: - :maxdepth: 2 - Foreign Function Interface for Python calling C code. The aim of this project is to provide a convenient and reliable way of calling C code from Python. The interface is based on `LuaJIT's FFI`_ and follows a few principles: From noreply at buildbot.pypy.org Sun Nov 24 14:22:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 24 Nov 2013 14:22:00 +0100 (CET) Subject: [pypy-commit] cffi release-0.8: hg merge default Message-ID: <20131124132200.359F51C1176@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1429:4ed0f953d3c5 Date: 2013-11-24 14:21 +0100 http://bitbucket.org/cffi/cffi/changeset/4ed0f953d3c5/ Log: hg merge default diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,9 +1,6 @@ CFFI documentation ================================ -.. toctree:: - :maxdepth: 2 - Foreign Function Interface for Python calling C code. The aim of this project is to provide a convenient and reliable way of calling C code from Python. The interface is based on `LuaJIT's FFI`_ and follows a few principles: From noreply at buildbot.pypy.org Sun Nov 24 15:22:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 24 Nov 2013 15:22:04 +0100 (CET) Subject: [pypy-commit] pypy default: A workaround against passing huge numbers or huge strings on 64-bit Message-ID: <20131124142204.B0DE21C3261@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68308:07f0c7c1a7ca Date: 2013-11-24 15:21 +0100 http://bitbucket.org/pypy/pypy/changeset/07f0c7c1a7ca/ Log: A workaround against passing huge numbers or huge strings on 64-bit machines diff --git a/rpython/rlib/rdtoa.py b/rpython/rlib/rdtoa.py --- a/rpython/rlib/rdtoa.py +++ b/rpython/rlib/rdtoa.py @@ -38,6 +38,10 @@ ], ) +# dtoa.c is limited to 'int', so we refuse to pass it +# strings or integer arguments bigger than ~2GB +_INT_LIMIT = 0x7ffff000 + dg_strtod = rffi.llexternal( '_PyPy_dg_strtod', [rffi.CCHARP, rffi.CCHARPP], rffi.DOUBLE, compilation_info=eci, sandboxsafe=True) @@ -52,6 +56,8 @@ compilation_info=eci, sandboxsafe=True) def strtod(input): + if len(input) > _INT_LIMIT: + raise MemoryError end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') try: ll_input = rffi.str2charp(input) @@ -232,6 +238,8 @@ def dtoa(value, code='r', mode=0, precision=0, flags=0, special_strings=lower_special_strings, upper=False): + if precision > _INT_LIMIT: + raise MemoryError decpt_ptr = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') try: sign_ptr = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') From noreply at buildbot.pypy.org Sun Nov 24 15:22:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 24 Nov 2013 15:22:03 +0100 (CET) Subject: [pypy-commit] pypy default: The "Signed *" declaration is not in sync with rdtoa.py Message-ID: <20131124142203.7DF621C325B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68307:8fdba9d51786 Date: 2013-11-24 15:21 +0100 http://bitbucket.org/pypy/pypy/changeset/8fdba9d51786/ Log: The "Signed *" declaration is not in sync with rdtoa.py diff --git a/rpython/translator/c/src/dtoa.c b/rpython/translator/c/src/dtoa.c --- a/rpython/translator/c/src/dtoa.c +++ b/rpython/translator/c/src/dtoa.c @@ -2329,7 +2329,7 @@ static char * __Py_dg_dtoa(double dd, int mode, int ndigits, - Signed *decpt, Signed *sign, char **rve) + int *decpt, int *sign, char **rve) { /* Arguments ndigits, decpt, sign are similar to those of ecvt and fcvt; trailing zeros are suppressed from @@ -2952,7 +2952,7 @@ } char * _PyPy_dg_dtoa(double dd, int mode, int ndigits, - Signed *decpt, Signed *sign, char **rve) + int *decpt, int *sign, char **rve) { char* result; _PyPy_SET_53BIT_PRECISION_HEADER; diff --git a/rpython/translator/c/src/dtoa.h b/rpython/translator/c/src/dtoa.h --- a/rpython/translator/c/src/dtoa.h +++ b/rpython/translator/c/src/dtoa.h @@ -2,6 +2,6 @@ double _PyPy_dg_strtod(const char *str, char **ptr); char * _PyPy_dg_dtoa(double d, int mode, int ndigits, - Signed *decpt, Signed *sign, char **rve); + int *decpt, int *sign, char **rve); void _PyPy_dg_freedtoa(char *s); From noreply at buildbot.pypy.org Sun Nov 24 16:45:25 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 24 Nov 2013 16:45:25 +0100 (CET) Subject: [pypy-commit] pypy default: Catch MemoryErrors raised by the machinery of interactive command-line (like compiling the input), and Message-ID: <20131124154525.D8D9E1C1176@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68309:6d65219fce4b Date: 2013-11-24 16:44 +0100 http://bitbucket.org/pypy/pypy/changeset/6d65219fce4b/ Log: Catch MemoryErrors raised by the machinery of interactive command- line (like compiling the input), and display it like we do KeyboardInterrupt. diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py --- a/lib_pypy/pyrepl/simple_interact.py +++ b/lib_pypy/pyrepl/simple_interact.py @@ -63,3 +63,6 @@ except KeyboardInterrupt: console.write("\nKeyboardInterrupt\n") console.resetbuffer() + except MemoryError: + console.write("\nMemoryError\n") + console.resetbuffer() From noreply at buildbot.pypy.org Sun Nov 24 19:24:49 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 24 Nov 2013 19:24:49 +0100 (CET) Subject: [pypy-commit] pypy default: fix translation, be more like cpython Message-ID: <20131124182449.317571C1473@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r68310:d45c21ada48f Date: 2013-11-24 20:22 +0200 http://bitbucket.org/pypy/pypy/changeset/d45c21ada48f/ Log: fix translation, be more like cpython diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -25,16 +25,18 @@ #define Py_UNICODE_SIZE 2 #endif -#if defined(_MSC_VER) - /* So MSVC users need not specify the .lib file in - * their Makefile (other compilers are generally - * taken care of by distutils.) */ -# ifdef _DEBUG -# error("debug first with cpython") -# pragma comment(lib,"python27.lib") -# else -# pragma comment(lib,"python27.lib") -# endif /* _DEBUG */ +#ifndef Py_BUILD_CORE /* not building the core - must be an ext */ +# if defined(_MSC_VER) + /* So MSVC users need not specify the .lib file in + * their Makefile (other compilers are generally + * taken care of by distutils.) */ +# ifdef _DEBUG +# error("debug first with cpython") +# pragma comment(lib,"python27.lib") +# else +# pragma comment(lib,"python27.lib") +# endif /* _DEBUG */ +# endif #endif /* _MSC_VER */ From noreply at buildbot.pypy.org Sun Nov 24 19:28:01 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 24 Nov 2013 19:28:01 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: fix translation, be more like cpython Message-ID: <20131124182801.8C2BF1C1473@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.2.x Changeset: r68311:11ffafdff4d6 Date: 2013-11-24 20:22 +0200 http://bitbucket.org/pypy/pypy/changeset/11ffafdff4d6/ Log: fix translation, be more like cpython (grafted from d45c21ada48feb930b2afff45ce9f032df5acc98) diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -25,16 +25,18 @@ #define Py_UNICODE_SIZE 2 #endif -#if defined(_MSC_VER) - /* So MSVC users need not specify the .lib file in - * their Makefile (other compilers are generally - * taken care of by distutils.) */ -# ifdef _DEBUG -# error("debug first with cpython") -# pragma comment(lib,"python27.lib") -# else -# pragma comment(lib,"python27.lib") -# endif /* _DEBUG */ +#ifndef Py_BUILD_CORE /* not building the core - must be an ext */ +# if defined(_MSC_VER) + /* So MSVC users need not specify the .lib file in + * their Makefile (other compilers are generally + * taken care of by distutils.) */ +# ifdef _DEBUG +# error("debug first with cpython") +# pragma comment(lib,"python27.lib") +# else +# pragma comment(lib,"python27.lib") +# endif /* _DEBUG */ +# endif #endif /* _MSC_VER */ From noreply at buildbot.pypy.org Sun Nov 24 19:34:14 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 24 Nov 2013 19:34:14 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: avoid a fail attempt to copy nothing to a nullptr buffer Message-ID: <20131124183414.8DD801C1473@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: release-2.2.x Changeset: r68312:87aa9de10f9c Date: 2013-11-15 11:44 -0800 http://bitbucket.org/pypy/pypy/changeset/87aa9de10f9c/ Log: avoid a fail attempt to copy nothing to a nullptr buffer (grafted from 8d0da723fb85a16a3c850f2d7ed6d98b967cb6e2) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -236,6 +236,8 @@ raise OperationError(self.space.w_ValueError, self.space.wrap(msg)) oldlen = self.len new = len(s) / self.itemsize + if not new: + return self.setlen(oldlen + new) cbuf = self._charbuf_start() copy_string_to_raw(llstr(s), rffi.ptradd(cbuf, oldlen * self.itemsize), 0, len(s)) diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -171,6 +171,9 @@ a = self.array('c') a.fromstring('Hi!') assert a[0] == 'H' and a[1] == 'i' and a[2] == '!' and len(a) == 3 + a = self.array('c') + a.fromstring('') + assert not len(a) for t in 'bBhHiIlLfd': a = self.array(t) From noreply at buildbot.pypy.org Sun Nov 24 21:26:23 2013 From: noreply at buildbot.pypy.org (ltratt) Date: Sun, 24 Nov 2013 21:26:23 +0100 (CET) Subject: [pypy-commit] pypy more_strategies: Revert the sorted list experiment. Message-ID: <20131124202623.16BFE1C0095@cobra.cs.uni-duesseldorf.de> Author: Laurence Tratt Branch: more_strategies Changeset: r68313:b80e7392cb94 Date: 2013-11-24 20:17 +0000 http://bitbucket.org/pypy/pypy/changeset/b80e7392cb94/ Log: Revert the sorted list experiment. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -896,7 +896,7 @@ def switch_to_correct_strategy(self, w_list, w_item): if type(w_item) is W_IntObject: - strategy = self.space.fromcache(IntegerListAscendingStrategy) + strategy = self.space.fromcache(IntegerListStrategy) elif type(w_item) is W_StringObject: strategy = self.space.fromcache(StringListStrategy) elif type(w_item) is W_UnicodeObject: @@ -1010,11 +1010,7 @@ def switch_to_integer_strategy(self, w_list): items = self._getitems_range(w_list, False) - start, step, length = self.unerase(w_list.lstorage) - if step > 0: - strategy = w_list.strategy = self.space.fromcache(IntegerListAscendingStrategy) - else: - strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) + strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) w_list.lstorage = strategy.erase(items) def wrap(self, intval): @@ -1522,25 +1518,6 @@ def unwrap(self, w_int): return self.space.int_w(w_int) - def init_from_list_w(self, w_list, list_w): - # While unpacking integer elements, also determine whether they're - # pre-sorted. - assert len(list_w) > 0 - asc = True - l = [0] * len(list_w) - lst = l[0] = self.unwrap(list_w[0]) - for i in range(1, len(list_w)): - item_w = list_w[i] - it = self.unwrap(item_w) - if asc and it < lst: - asc = False - l[i] = it - lst = it - w_list.lstorage = self.erase(l) - if asc: - # The list was already sorted into ascending order. - w_list.strategy = self.space.fromcache(IntegerListAscendingStrategy) - erase, unerase = rerased.new_erasing_pair("integer") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -1549,8 +1526,7 @@ return type(w_obj) is W_IntObject def list_is_correct_type(self, w_list): - return w_list.strategy is self.space.fromcache(IntegerListStrategy) \ - or w_list.strategy is self.space.fromcache(IntegerListAscendingStrategy) + return w_list.strategy is self.space.fromcache(IntegerListStrategy) def sort(self, w_list, reverse): l = self.unerase(w_list.lstorage) @@ -1558,8 +1534,6 @@ sorter.sort() if reverse: l.reverse() - else: - w_list.strategy = self.space.fromcache(IntegerListAscendingStrategy) def getitems_int(self, w_list): return self.unerase(w_list.lstorage) @@ -1637,94 +1611,6 @@ self.space, storage, self) return self._base_setslice(w_list, start, step, slicelength, w_other) -class IntegerListAscendingStrategy(IntegerListStrategy): - def sort(self, w_list, reverse): - if reverse: - self.unerase(w_list.lstorage).reverse() - w_list.strategy = self.space.fromcache(IntegerListStrategy) - - def append(self, w_list, w_item): - if type(w_item) is W_IntObject: - l = self.unerase(w_list.lstorage) - length = len(l) - item = self.unwrap(w_item) - if length == 0 or l[length - 1] <= item: - l.append(item) - return - w_list.strategy = self.space.fromcache(IntegerListStrategy) - IntegerListStrategy.append(self, w_list, w_item) - - def insert(self, w_list, index, w_item): - if type(w_item) is W_IntObject: - l = self.unerase(w_list.lstorage) - length = len(l) - item = self.unwrap(w_item) - if length == 0 or \ - ((index == 0 or l[index - 1] <= item) and (index == length or l[index] >= item)): - l.insert(index, item) - return - w_list.strategy = self.space.fromcache(IntegerListStrategy) - IntegerListStrategy.insert(self, w_list, index, w_item) - - def _extend_from_list(self, w_list, w_item): - if type(w_item) is W_ListObject and \ - w_item.strategy is self.space.fromcache(IntegerListAscendingStrategy): - self_l = self.unerase(w_list.lstorage) - other_l = self.unerase(w_item.lstorage) - if len(self_l) == 0 or len(other_l) == 0 or self_l[len(self_l) - 1] <= other_l[0]: - self_l.extend(other_l) - return - w_list.strategy = self.space.fromcache(IntegerListStrategy) - IntegerListStrategy._extend_from_list(self,w_list, w_item) - - def setitem(self, w_list, index, w_item): - if type(w_item) is W_IntObject: - item = self.unwrap(w_item) - l = self.unerase(w_list.lstorage) - length = len(l) - assert len(l) > 0 - if (index == 0 or l[index - 1] <= item) \ - and (index == length - 1 or l[index + 1] >= item): - l[index] = item - return - w_list.strategy = self.space.fromcache(IntegerListStrategy) - IntegerListStrategy.setitem(self, w_list, index, w_item) - - def setslice(self, w_list, start, step, slicelength, w_other): - # XXX could be supported if really desired - w_list.strategy = self.space.fromcache(IntegerListStrategy) - IntegerListStrategy.setslice(self, w_list, start, step, slicelength, w_other) - - def inplace_mul(self, w_list, times): - l = self.unerase(w_list.lstorage) - length = len(l) - if length == 0: - return - if l[0] != l[length - 1]: - w_list.strategy = self.space.fromcache(IntegerListStrategy) - IntegerListStrategy.inplace_mul(self, w_list, times) - - def reverse(self, w_list): - self.unerase(w_list.lstorage).reverse() - w_list.strategy = self.space.fromcache(IntegerListStrategy) - - def _safe_find(self, w_list, obj, start, stop): - if w_list.length() < 16: - return IntegerListStrategy._safe_find(self, w_list, obj, start, stop) - l = self.unerase(w_list.lstorage) - start -= 1 - stop += 1 - while stop - start > 1: - p = (start + stop) / 2 - if l[p] < obj: - start = p - else: - stop = p - if stop == len(l) or l[stop] != obj: - raise ValueError - return stop - - class FloatListStrategy(ListStrategy): import_from_mixin(AbstractUnwrappedStrategy) @@ -1754,7 +1640,7 @@ elif w_objt is W_IntObject or w_objt is W_LongObject: return self._safe_find(w_list, w_obj.float_w(self.space), start, stop) elif w_objt is W_StringObject or w_objt is W_UnicodeObject \ - or self.space.type(w_obj).compares_by_identity(): + or self.space.type(w_obj).compares_by_identity(): raise ValueError return ListStrategy.find(self, w_list, w_obj, start, stop) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -526,12 +526,6 @@ assert not l.__contains__(-20) assert not l.__contains__(-21) - l = list(range(1000)) - assert l.index(123) == 123 - del l[123] - raises(ValueError, "l.index(123)") - assert l.index(124) == 123 - def test_call_list(self): assert list('') == [] assert list('abc') == ['a', 'b', 'c'] @@ -583,10 +577,6 @@ assert m == [5,2,3] assert l == [1,2,3] - l = [1,2,3] - l.extend([3,4]) - assert l == [1, 2, 3, 3, 4] - def test_extend_tuple(self): l = l0 = [1] l.extend((2,)) diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,8 +1,5 @@ import sys -from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, \ - ObjectListStrategy, IntegerListStrategy, IntegerListAscendingStrategy, \ - FloatListStrategy, StringListStrategy, RangeListStrategy, make_range_list, \ - UnicodeListStrategy +from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, StringListStrategy, RangeListStrategy, make_range_list, UnicodeListStrategy from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject @@ -345,6 +342,7 @@ r = make_range_list(space, 1,3,7) empty.extend(r) assert isinstance(empty.strategy, RangeListStrategy) + print empty.getitem(6) assert space.is_true(space.eq(empty.getitem(1), w(4))) empty = W_ListObject(space, []) @@ -482,8 +480,7 @@ l1 = make_range_list(self.space, 0, 1, 100) l2 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) l3 = self.space.add(l2, l1) - assert isinstance(l2.strategy, IntegerListAscendingStrategy) - assert isinstance(l3.strategy, IntegerListStrategy) + assert l3.strategy is l2.strategy def test_mul(self): l1 = W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3)]) @@ -662,80 +659,6 @@ list_copy[0] = 42 assert list_orig == [1, 2, 3] - def test_integerascending(self): - space = self.space - w_l = W_ListObject(space, [space.wrap(1), space.wrap(3)]) - assert isinstance(w_l.strategy, IntegerListAscendingStrategy) - w_l.append(space.wrap(5)) - assert isinstance(w_l.strategy, IntegerListAscendingStrategy) - - w_l.insert(0, space.wrap(0)) - assert isinstance(w_l.strategy, IntegerListAscendingStrategy) - w_l.insert(4, space.wrap(6)) - assert isinstance(w_l.strategy, IntegerListAscendingStrategy) - assert space.listview_int(w_l) == [0, 1, 3, 5 ,6] - - w_l = W_ListObject(space, []) - w_l.insert(0, space.wrap(1)) - assert isinstance(w_l.strategy, IntegerListAscendingStrategy) - - w_l = W_ListObject(space, [space.wrap(3), space.wrap(2), space.wrap(4), space.wrap(1)]) - assert isinstance(w_l.strategy, IntegerListStrategy) - l2 = [1, 2, 3, 4] - space.call_method(w_l, "sort") - assert isinstance(w_l.strategy, IntegerListAscendingStrategy) - assert space.listview_int(w_l) == l2 - space.call_method(w_l, "sort") - assert space.listview_int(w_l) == l2 - w_l.append(space.wrap(5)) - assert isinstance(w_l.strategy, IntegerListAscendingStrategy) - w_l.append(space.wrap(0)) - assert isinstance(w_l.strategy, IntegerListStrategy) - - w_l = W_ListObject(space, []) - space.call_method(w_l, "extend", W_ListObject(space, [space.wrap(1), space.wrap(2)])) - assert space.listview_int(w_l) == [1, 2] - assert isinstance(w_l.strategy, IntegerListAscendingStrategy) - - space.call_method(w_l, "extend", W_ListObject(space, [space.wrap(4)])) - assert space.listview_int(w_l) == [1, 2, 4] - assert isinstance(w_l.strategy, IntegerListAscendingStrategy) - - space.call_method(w_l, "pop") - space.call_method(w_l, "pop") - space.call_method(w_l, "pop") - assert space.listview_int(w_l) == [] - assert isinstance(w_l.strategy, IntegerListAscendingStrategy) - space.call_method(w_l, "extend", W_ListObject(space, [space.wrap(4)])) - assert space.listview_int(w_l) == [4] - assert isinstance(w_l.strategy, IntegerListAscendingStrategy) - - space.call_method(w_l, "extend", W_ListObject(space, [space.wrap(0)])) - assert space.listview_int(w_l) == [4, 0] - assert isinstance(w_l.strategy, IntegerListStrategy) - - w_l = W_ListObject(space, [space.wrap(1), space.wrap(3), space.wrap(5)]) - assert isinstance(w_l.strategy, IntegerListAscendingStrategy) - w_l.setitem(0, space.wrap(0)) - w_l.setitem(1, space.wrap(4)) - w_l.setitem(2, space.wrap(6)) - assert isinstance(w_l.strategy, IntegerListAscendingStrategy) - w_l.setitem(1, space.wrap(7)) - assert isinstance(w_l.strategy, IntegerListStrategy) - - w_l = W_ListObject(space, [space.wrap(1), space.wrap(1)]) - w_l.inplace_mul(2) - assert isinstance(w_l.strategy, IntegerListAscendingStrategy) - w_l.append(space.wrap(2)) - w_l.inplace_mul(2) - assert isinstance(w_l.strategy, IntegerListStrategy) - - w_l = W_ListObject(space, [space.wrap(1), space.wrap(2)]) - assert isinstance(w_l.strategy, IntegerListAscendingStrategy) - w_l.sort(True) - assert isinstance(w_l.strategy, IntegerListStrategy) - assert space.listview_int(w_l) == [2, 1] - class TestW_ListStrategiesDisabled: spaceconfig = {"objspace.std.withliststrategies": False} From noreply at buildbot.pypy.org Sun Nov 24 23:07:08 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 24 Nov 2013 23:07:08 +0100 (CET) Subject: [pypy-commit] pypy default: skip unsupported test Message-ID: <20131124220708.3D4D61C3609@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r68314:a93f0c77683d Date: 2013-11-24 23:06 +0100 http://bitbucket.org/pypy/pypy/changeset/a93f0c77683d/ Log: skip unsupported test diff --git a/rpython/rtyper/test/test_runicode.py b/rpython/rtyper/test/test_runicode.py --- a/rpython/rtyper/test/test_runicode.py +++ b/rpython/rtyper/test/test_runicode.py @@ -282,6 +282,7 @@ test_int_valueerror = unsupported test_float = unsupported test_hlstr = unsupported + test_strip_multiple_chars = unsupported def test_hash_via_type(self): from rpython.rlib.objectmodel import compute_hash From noreply at buildbot.pypy.org Sun Nov 24 23:09:50 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 24 Nov 2013 23:09:50 +0100 (CET) Subject: [pypy-commit] buildbot numpy-tests: use pytest, get nice failure logs Message-ID: <20131124220950.A21011C3609@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-tests Changeset: r899:3be765a8c73d Date: 2013-11-25 00:06 +0200 http://bitbucket.org/pypy/buildbot/changeset/3be765a8c73d/ Log: use pytest, get nice failure logs diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -836,6 +836,13 @@ haltOnFailure=True, )) + self.addStep(ShellCmd( + description="install pytest", + command=['install/bin/pip', 'install','pytest'], + workdir='./', + haltOnFailure=True, + )) + # obtain a pypy-compatible branch of numpy numpy_url = 'https://www.bitbucket.org/pypy/numpy' numpy_pypy_branch = 'pypy-compat' @@ -847,14 +854,16 @@ command=['../install/bin/python', 'setup.py','install'], workdir='numpy_src')) - self.addStep(ShellCmd( + #test_cmd = 'bin/nosetests' + test_cmd = 'bin/py.test' + self.addStep(PytestCmd( description="test numpy", - command=['bin/nosetests', 'site-packages/numpy', + command=[test_cmd, 'site-packages/numpy', + "--resultlog=testrun.log", ], - #logfiles={'pytestLog': 'pytest-numpy.log'}, - timeout=4000, + logfiles={'pytestLog': 'testrun.log'}, + timeout=1000, workdir='install', - #env={"PYTHONPATH": ['download']}, # shouldn't be needed, but what if it is set externally? )) if host == 'tannit': pypy_c_rel = 'install/bin/python' From noreply at buildbot.pypy.org Sun Nov 24 23:09:51 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 24 Nov 2013 23:09:51 +0100 (CET) Subject: [pypy-commit] buildbot numpy-tests: remove all schedulers if debugging Message-ID: <20131124220951.ADD6F1C3609@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-tests Changeset: r900:2f122536ecdf Date: 2013-11-25 00:08 +0200 http://bitbucket.org/pypy/buildbot/changeset/2f122536ecdf/ Log: remove all schedulers if debugging diff --git a/master/master.cfg b/master/master.cfg --- a/master/master.cfg +++ b/master/master.cfg @@ -24,4 +24,9 @@ if we_are_debugging(): for builderdict in BuildmasterConfig['builders']: builderdict["slavenames"] = ['localhost'] + for s in BuildmasterConfig['schedulers']: + #This will incidentally test that all builders are + # CustomForceSchedulers + if isinstance(s, Nightly): + BuildmasterConfig['schedulers'].remove(s) BuildmasterConfig['buildbotURL'] = "http://localhost:%d/" % (httpPortNumber) From noreply at buildbot.pypy.org Sun Nov 24 23:09:52 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 24 Nov 2013 23:09:52 +0100 (CET) Subject: [pypy-commit] buildbot numpy-tests: merge default into branch Message-ID: <20131124220952.D37971C3609@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-tests Changeset: r901:9cc68795c263 Date: 2013-11-25 00:09 +0200 http://bitbucket.org/pypy/buildbot/changeset/9cc68795c263/ Log: merge default into branch diff --git a/bot2/pypybuildbot/arm_master.py b/bot2/pypybuildbot/arm_master.py --- a/bot2/pypybuildbot/arm_master.py +++ b/bot2/pypybuildbot/arm_master.py @@ -136,6 +136,7 @@ BUILDJITLINUXARM, BUILDLINUXARMHF_RASPBIAN, BUILDJITLINUXARMHF_RASPBIAN, + BUILDJITLINUXARMHF_RARING, ] schedulers = [ diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -866,6 +866,11 @@ workdir='install', )) if host == 'tannit': + self.addStep(ShellCmd( + description="install jinja2", + command=['install/bin/pip', 'install', 'jinja2'], + workdir='./', + haltOnFailure=True,)) pypy_c_rel = 'install/bin/python' self.addStep(ShellCmd( description="measure numpy compatibility", diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -214,18 +214,21 @@ # other platforms #MACOSX32, # on minime JITWIN32, # on aurora - JITFREEBSD764, # on headless - JITFREEBSD864, # on ananke - JITFREEBSD964, # on exarkun's freebsd + #JITFREEBSD764, # on headless + #JITFREEBSD864, # on ananke + JITFREEBSD964, # on tavendo JITMACOSX64, # on xerxes # buildbot selftest PYPYBUILDBOT # on cobra ], branch='default', hour=0, minute=0), Nightly("nightly-2-00", [ + NUMPY_64, # on tannit64, uses 1 core, takes about 15min. + # XXX maybe use a trigger instead? JITBENCH, # on tannit32, uses 1 core (in part exclusively) JITBENCH64, # on tannit64, uses 1 core (in part exclusively) - ], branch='default', hour=2, minute=0), + + ], branch=None, hour=2, minute=0), Nightly("nightly-2-00-py3k", [ LINUX64, # on allegro64, uses all cores @@ -235,6 +238,7 @@ Nightly("nighly-ppc", [ JITONLYLINUXPPC64, # on gcc1 ], branch='ppc-jit-backend', hour=1, minute=0), + CustomForceScheduler('Force Scheduler', builderNames=[ PYPYBUILDBOT, @@ -411,7 +415,7 @@ "category": 'freebsd64' }, {"name" : JITFREEBSD964, - "slavenames": ['hybridlogic'], + "slavenames": ['hybridlogic', 'tavendo-freebsd-9.2-amd64'], 'builddir' : JITFREEBSD964, 'factory' : pypyJITTranslatedTestFactoryFreeBSD, "category": 'freebsd64' diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -5,7 +5,8 @@ import cgi import urllib import sys -from twisted.web.static import File, DirectoryLister +from twisted.web.static import File, formatFileSize +from buildbot.status.web.base import DirectoryLister class PyPyTarball(object): @@ -142,98 +143,39 @@ names = File.listNames(self) if is_pypy_dir(names): names = self.sortBuildNames(names) - Listener = PyPyDirectoryLister else: names = self.sortDirectoryNames(File.listEntities(self)) - Listener = DirectoryLister + Listener = PyPyDirectoryLister return Listener(self.path, names, self.contentTypes, self.contentEncodings, self.defaultType) -class NumpyStatusList(File): - pass - class PyPyDirectoryLister(DirectoryLister): - template = """ - -%(header)s - - - - -

%(header)s

- - - - - - - - - - - - -%(tableContent)s - -
FilenameSizeDateown testsapplevel tests
- - - -""" - - linePattern = """ - %(text)s - %(size)s - %(date)s - %(own_summary)s - %(app_summary)s - -""" + '''template based, uses master/templates/directory.html + ''' def render(self, request): self.status = request.site.buildbot_service.getStatus() return DirectoryLister.render(self, request) - def _buildTableContent(self, elements): - tableContent = [] + def _getFilesAndDirectories(self, directory): + dirs, files = DirectoryLister._getFilesAndDirectories(self, directory) rowClasses = itertools.cycle(['odd', 'even']) - for element, rowClass in zip(elements, rowClasses): - element["class"] = rowClass - self._add_test_results(element, rowClass) - tableContent.append(self.linePattern % element) - return tableContent + for f, rowClass in zip(files, rowClasses): + f["class"] = rowClass + self._add_test_results(f, rowClass) + for d in dirs: + dirname = urllib.unquote(d['href']) + dd = py.path.local(self.path).join(dirname) + date = datetime.date.fromtimestamp(dd.mtime()) + d['date'] = date.isoformat() + # Assume dir is non-recursive + size = sum([f.size() for f in dd.listdir() if f.isfile()]) + d['size'] = formatFileSize(size) + + return dirs, files def _add_test_results(self, element, rowClass): filename = urllib.unquote(element['href']) @@ -292,3 +234,6 @@ else: return rowClass + '-failed' +class NumpyStatusList(PyPyList): + pass + diff --git a/master/templates/directory.html b/master/templates/directory.html new file mode 100644 --- /dev/null +++ b/master/templates/directory.html @@ -0,0 +1,94 @@ +{% extends "layout.html" %} +{% block morehead %} + + +{% endblock %} + +{% block content %} + +

Directory listing for {{ path }}

+ +{% set row_class = cycler('odd', 'even') %} + +{% set has_tests = files|join('', attribute='own_summary')|length > 0 or + files|join('', attribute='app_summary')|length > 0 %} + + + +{% if files|length > 0 %} + + + + +{% if has_tests %} + + +{% endif %} + +{% else %} + + + + +{% if has_tests %} + + +{% endif %} + +{% endif %} + +{% for d in directories %} + + + + +{% if has_tests %} + + +{% endif %} + +{% endfor %} + +{% for f in files %} + + + + +{% if has_tests %} + + +{% endif %} + +{% endfor %} +
FilenameSizeDateown testsapplevel tests
DirectorySizeDate
{{ d.text }}{{ d.size}}{{ d.date}}
{{ f.text }}{{ f.size }}{{ f.date }}{{ f.own_summary }}{{ f.app_summary }}
+ +{% endblock %} diff --git a/master/templates/layout.html b/master/templates/layout.html --- a/master/templates/layout.html +++ b/master/templates/layout.html @@ -23,19 +23,19 @@ {% block header -%}
Home - - - Speed - Summary (trunk) - Summary - Nightly builds + - Speed + - Numpy compatability + - Summary (trunk) + - Summary + - Nightly builds - Waterfall + - Waterfall - Builders + - Builders diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,7 @@ buildbot-slave==0.8.6p1 decorator==3.4.0 mock==1.0.1 -py==1.4.9 +py==1.4.18 pytest==2.2.4 python-dateutil==1.5 sqlalchemy-migrate==0.7.2 From noreply at buildbot.pypy.org Mon Nov 25 11:44:11 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 25 Nov 2013 11:44:11 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_pass_ndarray_object_to_c by exposing W_NDimArray to C as we do for all the other builtin types Message-ID: <20131125104411.8DC551C3609@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r68315:7f3a776cc72a Date: 2013-11-25 11:43 +0100 http://bitbucket.org/pypy/pypy/changeset/7f3a776cc72a/ Log: fix test_pass_ndarray_object_to_c by exposing W_NDimArray to C as we do for all the other builtin types diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -26,6 +26,7 @@ from pypy.module.__builtin__.descriptor import W_Property from pypy.module.__builtin__.interp_classobj import W_ClassObject from pypy.module.__builtin__.interp_memoryview import W_MemoryView +from pypy.module.micronumpy.base import W_NDimArray from rpython.rlib.entrypoint import entrypoint_lowlevel from rpython.rlib.rposix import is_valid_fd, validate_fd from rpython.rlib.unroll import unrolling_iterable @@ -469,6 +470,7 @@ "Complex": "space.w_complex", "ByteArray": "space.w_bytearray", "MemoryView": "space.gettypeobject(W_MemoryView.typedef)", + "Array": "space.gettypeobject(W_NDimArray.typedef)", "BaseObject": "space.w_object", 'None': 'space.type(space.w_None)', 'NotImplemented': 'space.type(space.w_NotImplemented)', diff --git a/pypy/module/cpyext/src/ndarrayobject.c b/pypy/module/cpyext/src/ndarrayobject.c --- a/pypy/module/cpyext/src/ndarrayobject.c +++ b/pypy/module/cpyext/src/ndarrayobject.c @@ -3,8 +3,6 @@ #include "numpy/arrayobject.h" #include /* memset, memcpy */ -PyTypeObject PyArray_Type; - void _PyArray_FILLWBYTE(PyObject* obj, int val) { memset(PyArray_DATA(obj), val, PyArray_NBYTES(obj)); diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -287,7 +287,6 @@ def test_pass_ndarray_object_to_c(self): - skip('fixme') from _numpypy.multiarray import ndarray mod = self.import_extension('foo', [ ("check_array", "METH_VARARGS", From noreply at buildbot.pypy.org Mon Nov 25 11:44:40 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 25 Nov 2013 11:44:40 +0100 (CET) Subject: [pypy-commit] pypy ndarray-buffer: hg merge default Message-ID: <20131125104440.1490E1C3609@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: ndarray-buffer Changeset: r68316:eda923010bdb Date: 2013-11-25 11:44 +0100 http://bitbucket.org/pypy/pypy/changeset/eda923010bdb/ Log: hg merge default diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py --- a/lib_pypy/pyrepl/simple_interact.py +++ b/lib_pypy/pyrepl/simple_interact.py @@ -63,3 +63,6 @@ except KeyboardInterrupt: console.write("\nKeyboardInterrupt\n") console.resetbuffer() + except MemoryError: + console.write("\nMemoryError\n") + console.resetbuffer() diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -15,3 +15,6 @@ .. branch: armhf-singlefloat JIT support for singlefloats on ARM using the hardfloat ABI + +.. branch: voidtype_strformat +Better support for record numpy arrays diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -26,6 +26,7 @@ from pypy.module.__builtin__.descriptor import W_Property from pypy.module.__builtin__.interp_classobj import W_ClassObject from pypy.module.__builtin__.interp_memoryview import W_MemoryView +from pypy.module.micronumpy.base import W_NDimArray from rpython.rlib.entrypoint import entrypoint_lowlevel from rpython.rlib.rposix import is_valid_fd, validate_fd from rpython.rlib.unroll import unrolling_iterable @@ -469,6 +470,7 @@ "Complex": "space.w_complex", "ByteArray": "space.w_bytearray", "MemoryView": "space.gettypeobject(W_MemoryView.typedef)", + "Array": "space.gettypeobject(W_NDimArray.typedef)", "BaseObject": "space.w_object", 'None': 'space.type(space.w_None)', 'NotImplemented': 'space.type(space.w_NotImplemented)', diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -25,6 +25,22 @@ #define Py_UNICODE_SIZE 2 #endif +#ifndef Py_BUILD_CORE /* not building the core - must be an ext */ +# if defined(_MSC_VER) + /* So MSVC users need not specify the .lib file in + * their Makefile (other compilers are generally + * taken care of by distutils.) */ +# ifdef _DEBUG +# error("debug first with cpython") +# pragma comment(lib,"python27.lib") +# else +# pragma comment(lib,"python27.lib") +# endif /* _DEBUG */ +# endif +#endif /* _MSC_VER */ + + + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/src/ndarrayobject.c b/pypy/module/cpyext/src/ndarrayobject.c --- a/pypy/module/cpyext/src/ndarrayobject.c +++ b/pypy/module/cpyext/src/ndarrayobject.c @@ -3,8 +3,6 @@ #include "numpy/arrayobject.h" #include /* memset, memcpy */ -PyTypeObject PyArray_Type; - void _PyArray_FILLWBYTE(PyObject* obj, int val) { memset(PyArray_DATA(obj), val, PyArray_NBYTES(obj)); diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -286,3 +286,19 @@ assert dt.num == 11 + def test_pass_ndarray_object_to_c(self): + from _numpypy.multiarray import ndarray + mod = self.import_extension('foo', [ + ("check_array", "METH_VARARGS", + ''' + PyObject* obj; + if (!PyArg_ParseTuple(args, "O!", &PyArray_Type, &obj)) + return NULL; + Py_INCREF(obj); + return obj; + '''), + ], prologue='#include ') + array = ndarray((3, 4), dtype='d') + assert mod.check_array(array) is array + raises(TypeError, "mod.check_array(42)") + diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -342,10 +342,8 @@ if space.is_w(w_startstop, space.w_None): start = 0 else: - start = space.int_w(w_startstop) - if start < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Indicies for islice() must be non-negative integers.")) + start = self.arg_int_w(w_startstop, 0, + "Indicies for islice() must be None or non-negative integers") w_stop = args_w[0] else: raise OperationError(space.w_TypeError, space.wrap("islice() takes at most 4 arguments (" + str(num_args) + " given)")) @@ -353,10 +351,8 @@ if space.is_w(w_stop, space.w_None): stop = -1 else: - stop = space.int_w(w_stop) - if stop < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Stop argument must be a non-negative integer or None.")) + stop = self.arg_int_w(w_stop, 0, + "Stop argument must be a non-negative integer or None.") stop = max(start, stop) # for obscure CPython compatibility if num_args == 2: @@ -364,10 +360,8 @@ if space.is_w(w_step, space.w_None): step = 1 else: - step = space.int_w(w_step) - if step < 1: - raise OperationError(space.w_ValueError, space.wrap( - "Step must be one or lager for islice().")) + step = self.arg_int_w(w_step, 1, + "Step for islice() must be a positive integer or None") else: step = 1 @@ -375,6 +369,18 @@ self.start = start self.stop = stop + def arg_int_w(self, w_obj, minimum, errormsg): + space = self.space + try: + result = space.int_w(w_obj) + except OperationError, e: + if e.async(space): + raise + result = -1 + if result < minimum: + raise OperationError(space.w_ValueError, space.wrap(errormsg)) + return result + def iter_w(self): return self.space.wrap(self) diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -304,6 +304,11 @@ raises(TypeError, itertools.islice, [], 0, 0, 0, 0) + # why not TypeError? Because CPython + raises(ValueError, itertools.islice, [], "a", 1, 2) + raises(ValueError, itertools.islice, [], 0, "a", 2) + raises(ValueError, itertools.islice, [], 0, 1, "a") + def test_chain(self): import itertools diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -56,6 +56,8 @@ self.aliases = aliases self.float_type = float_type self.fields = fields + if fieldnames is None: + fieldnames = [] self.fieldnames = fieldnames self.shape = list(shape) self.subdtype = subdtype @@ -214,15 +216,15 @@ self.name = "void" + str(8 * self.get_size()) def descr_get_names(self, space): - if self.fieldnames is None: + if len(self.fieldnames) == 0: return space.w_None return space.newtuple([space.wrap(name) for name in self.fieldnames]) def set_names(self, space, w_names): + self.fieldnames = [] if w_names == space.w_None: - self.fieldnames = None + return else: - self.fieldnames = [] iter = space.iter(w_names) while True: try: diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -252,12 +252,13 @@ return space.wrap(self.dump_data()) return space.call_function(cache.w_array_str, self) - def dump_data(self): + def dump_data(self, prefix='array(', suffix=')'): i = self.create_iter() first = True dtype = self.get_dtype() s = StringBuilder() - s.append('array([') + s.append(prefix) + s.append('[') while not i.done(): if first: first = False @@ -265,7 +266,8 @@ s.append(', ') s.append(dtype.itemtype.str_format(i.getitem())) i.next() - s.append('])') + s.append(']') + s.append(suffix) return s.build() def create_iter(self, shape=None, backward_broadcast=False, require_index=False): diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -61,10 +61,22 @@ def apply(self, space, orig_arr): arr = orig_arr.implementation ofs, subdtype = arr.dtype.fields[self.name] - # strides backstrides are identical, ofs only changes start - return W_NDimArray.new_slice(space, arr.start + ofs, arr.get_strides(), - arr.get_backstrides(), - arr.shape, arr, orig_arr, subdtype) + # ofs only changes start + # create a view of the original array by extending + # the shape, strides, backstrides of the array + from pypy.module.micronumpy.support import calc_strides + strides, backstrides = calc_strides(subdtype.shape, + subdtype.subdtype, arr.order) + final_shape = arr.shape + subdtype.shape + final_strides = arr.get_strides() + strides + final_backstrides = arr.get_backstrides() + backstrides + final_dtype = subdtype + print self.name,'strides',arr.get_strides(),strides + if subdtype.subdtype: + final_dtype = subdtype.subdtype + return W_NDimArray.new_slice(space, arr.start + ofs, final_strides, + final_backstrides, + final_shape, arr, orig_arr, final_dtype) class Chunks(BaseChunk): def __init__(self, l): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3138,7 +3138,9 @@ ] h = np.array(buf, dtype=descr) assert len(h) == 2 - skip('broken') # XXX + assert h['x'].shape == (2, 2) + assert h['y'].strides == (41, 16, 8) + assert h['z'].shape == (2,) for v in (h, h[0], h['x']): repr(v) # check for crash in repr assert (h['x'] == np.array([buf[0][0], @@ -3169,6 +3171,22 @@ assert len(list(a[0])) == 2 + def test_3d_record(self): + from numpypy import dtype, array + dt = dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 2, 3))]) + a = array([('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]])], + dtype=dt) + s = str(a) + i = a.item() + assert isinstance(i, tuple) + assert len(i) == 4 + skip('incorrect formatting via dump_data') + assert s.endswith("[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " + "[[7, 8, 9], [10, 11, 12]]])]") + + def test_issue_1589(self): import numpypy as numpy c = numpy.array([[(1, 2, 'a'), (3, 4, 'b')], [(5, 6, 'c'), (7, 8, 'd')]], diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1789,6 +1789,40 @@ dtype.subdtype) return W_NDimArray(implementation) + def read(self, arr, i, offset, dtype=None): + if dtype is None: + dtype = arr.dtype + return interp_boxes.W_VoidBox(arr, i + offset, dtype) + + @jit.unroll_safe + def str_format(self, box): + assert isinstance(box, interp_boxes.W_VoidBox) + arr = self.readarray(box.arr, box.ofs, 0, box.dtype) + return arr.dump_data(prefix='', suffix='') + + def to_builtin_type(self, space, item): + ''' From the documentation of ndarray.item(): + "Void arrays return a buffer object for item(), + unless fields are defined, in which case a tuple is returned." + ''' + assert isinstance(item, interp_boxes.W_VoidBox) + dt = item.arr.dtype + ret_unwrapped = [] + for name in dt.fieldnames: + ofs, dtype = dt.fields[name] + if isinstance(dtype.itemtype, VoidType): + read_val = dtype.itemtype.readarray(item.arr, ofs, 0, dtype) + else: + read_val = dtype.itemtype.read(item.arr, ofs, 0, dtype) + if isinstance (read_val, interp_boxes.W_StringBox): + # StringType returns a str + read_val = space.wrap(dtype.itemtype.to_str(read_val)) + ret_unwrapped = ret_unwrapped + [read_val,] + if len(ret_unwrapped) == 0: + raise OperationError(space.w_NotImplementedError, space.wrap( + "item() for Void aray with no fields not implemented")) + return space.newtuple(ret_unwrapped) + class RecordType(FlexibleType): T = lltype.Char @@ -1848,7 +1882,8 @@ first = False else: pieces.append(", ") - pieces.append(tp.str_format(tp.read(box.arr, box.ofs, ofs))) + val = tp.read(box.arr, box.ofs, ofs, subdtype) + pieces.append(tp.str_format(val)) pieces.append(")") return "".join(pieces) diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -371,15 +371,19 @@ listdef.generalize(self.immutablevalue(e, False)) result = SomeList(listdef) elif tp is dict or tp is r_dict or tp is SomeOrderedDict.knowntype: + if tp is SomeOrderedDict.knowntype: + cls = SomeOrderedDict + else: + cls = SomeDict if need_const: key = Constant(x) try: return self.immutable_cache[key] except KeyError: - result = SomeDict(DictDef(self, - s_ImpossibleValue, - s_ImpossibleValue, - is_r_dict = tp is r_dict)) + result = cls(DictDef(self, + s_ImpossibleValue, + s_ImpossibleValue, + is_r_dict = tp is r_dict)) self.immutable_cache[key] = result if tp is r_dict: s_eqfn = self.immutablevalue(x.key_eq) @@ -412,10 +416,7 @@ dictdef.generalize_key(self.immutablevalue(ek, False)) dictdef.generalize_value(self.immutablevalue(ev, False)) dictdef.seen_prebuilt_key(ek) - if tp is SomeOrderedDict.knowntype: - result = SomeOrderedDict(dictdef) - else: - result = SomeDict(dictdef) + result = cls(dictdef) elif tp is weakref.ReferenceType: x1 = x() if x1 is None: diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4148,6 +4148,19 @@ a.build_types(f, [str, str]) assert ("format() is not RPython" in exc.value.msg) + def test_prebuilt_ordered_dict(self): + try: + from collections import OrderedDict + except ImportError: + py.test.skip("Please upgrade to python 2.7") + d = OrderedDict([("aa", 1)]) + + def f(): + return d + + a = self.RPythonAnnotator() + assert isinstance(a.build_types(f, []), annmodel.SomeOrderedDict) + def g(n): return [0, 1, 2, n] diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -460,13 +460,13 @@ check_negative_slice(start, end, "count") return SomeInteger(nonneg=True) - def method_strip(str, chr): + def method_strip(str, chr=None): return str.basestringclass(no_nul=str.no_nul) - def method_lstrip(str, chr): + def method_lstrip(str, chr=None): return str.basestringclass(no_nul=str.no_nul) - def method_rstrip(str, chr): + def method_rstrip(str, chr=None): return str.basestringclass(no_nul=str.no_nul) def method_join(str, s_list): diff --git a/rpython/jit/backend/x86/test/test_ztranslation_basic.py b/rpython/jit/backend/x86/test/test_ztranslation_basic.py --- a/rpython/jit/backend/x86/test/test_ztranslation_basic.py +++ b/rpython/jit/backend/x86/test/test_ztranslation_basic.py @@ -1,11 +1,11 @@ from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTest from rpython.jit.backend.x86.arch import WORD +import sys class TestTranslationX86(TranslationTest): def _check_cbuilder(self, cbuilder): - # We assume here that we have sse2. If not, the CPUClass - # needs to be changed to CPU386_NO_SSE2, but well. - if WORD == 4: + # msse2 and sse are always on on x86-64 + if WORD == 4 and sys.platform != 'win32': assert '-msse2' in cbuilder.eci.compile_extra assert '-mfpmath=sse' in cbuilder.eci.compile_extra diff --git a/rpython/jit/backend/x86/test/test_ztranslation_call_assembler.py b/rpython/jit/backend/x86/test/test_ztranslation_call_assembler.py --- a/rpython/jit/backend/x86/test/test_ztranslation_call_assembler.py +++ b/rpython/jit/backend/x86/test/test_ztranslation_call_assembler.py @@ -1,11 +1,13 @@ from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTestCallAssembler from rpython.translator.translator import TranslationContext from rpython.config.translationoption import DEFL_GC - +from rpython.jit.backend.x86.arch import WORD +import sys class TestTranslationCallAssemblerX86(TranslationTestCallAssembler): def _check_cbuilder(self, cbuilder): - # We assume here that we have sse2. If not, the CPUClass + #We assume here that we have sse2. If not, the CPUClass # needs to be changed to CPU386_NO_SSE2, but well. - assert '-msse2' in cbuilder.eci.compile_extra - assert '-mfpmath=sse' in cbuilder.eci.compile_extra \ No newline at end of file + if WORD == 4 and sys.platform != 'win32': + assert '-msse2' in cbuilder.eci.compile_extra + assert '-mfpmath=sse' in cbuilder.eci.compile_extra diff --git a/rpython/jit/backend/x86/test/test_ztranslation_jit_stats.py b/rpython/jit/backend/x86/test/test_ztranslation_jit_stats.py --- a/rpython/jit/backend/x86/test/test_ztranslation_jit_stats.py +++ b/rpython/jit/backend/x86/test/test_ztranslation_jit_stats.py @@ -1,11 +1,14 @@ from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTestJITStats from rpython.translator.translator import TranslationContext from rpython.config.translationoption import DEFL_GC +from rpython.jit.backend.x86.arch import WORD +import sys class TestTranslationJITStatsX86(TranslationTestJITStats): def _check_cbuilder(self, cbuilder): - # We assume here that we have sse2. If not, the CPUClass + #We assume here that we have sse2. If not, the CPUClass # needs to be changed to CPU386_NO_SSE2, but well. - assert '-msse2' in cbuilder.eci.compile_extra - assert '-mfpmath=sse' in cbuilder.eci.compile_extra \ No newline at end of file + if WORD == 4 and sys.platform != 'win32': + assert '-msse2' in cbuilder.eci.compile_extra + assert '-mfpmath=sse' in cbuilder.eci.compile_extra diff --git a/rpython/rlib/rdtoa.py b/rpython/rlib/rdtoa.py --- a/rpython/rlib/rdtoa.py +++ b/rpython/rlib/rdtoa.py @@ -38,6 +38,10 @@ ], ) +# dtoa.c is limited to 'int', so we refuse to pass it +# strings or integer arguments bigger than ~2GB +_INT_LIMIT = 0x7ffff000 + dg_strtod = rffi.llexternal( '_PyPy_dg_strtod', [rffi.CCHARP, rffi.CCHARPP], rffi.DOUBLE, compilation_info=eci, sandboxsafe=True) @@ -52,6 +56,8 @@ compilation_info=eci, sandboxsafe=True) def strtod(input): + if len(input) > _INT_LIMIT: + raise MemoryError end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') try: ll_input = rffi.str2charp(input) @@ -232,6 +238,8 @@ def dtoa(value, code='r', mode=0, precision=0, flags=0, special_strings=lower_special_strings, upper=False): + if precision > _INT_LIMIT: + raise MemoryError decpt_ptr = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') try: sign_ptr = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -4,7 +4,6 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rarithmetic import r_uint -from rpython.rlib.objectmodel import we_are_translated from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform @@ -80,38 +79,6 @@ RTLD_NOW = cConfig.RTLD_NOW RTLD_LAZY = cConfig.RTLD_LAZY - _t_opened = {} - - def t_dlopen(name): - # for direct execution: can't use the regular way on FreeBSD :-( - # http://factor-language.blogspot.de/2009/02/note-about-libdl-functions-on-netbsd.html - import ctypes - if name: - name = rffi.charp2str(name) - else: - name = None - try: - res = ctypes.cdll.LoadLibrary(name) - except OSError, e: - raise DLOpenError(str(e)) - h = rffi.cast(rffi.VOIDP, res._handle) - _t_opened[rffi.cast(rffi.LONG, h)] = res - return h - - def t_dlclose(handle): - _t_opened.pop(rffi.cast(rffi.LONG, handle)) - return rffi.cast(rffi.INT, 0) - - def t_dldym(handle, name): - import ctypes - lib = _t_opened[rffi.cast(rffi.LONG, handle)] - try: - symbol = lib[name] - except AttributeError: - raise KeyError(name) - res = ctypes.cast(symbol, ctypes.c_void_p) - return rffi.cast(rffi.VOIDP, res.value or 0) - def dlerror(): # XXX this would never work on top of ll2ctypes, because # ctypes are calling dlerror itself, unsure if I can do much in this @@ -124,8 +91,6 @@ def dlopen(name, mode=-1): """ Wrapper around C-level dlopen """ - if not we_are_translated(): - return t_dlopen(name) if mode == -1: if RTLD_LOCAL is not None: mode = RTLD_LOCAL @@ -139,16 +104,11 @@ raise DLOpenError(err) return res - def dlclose(handle): - if not we_are_translated(): - return t_dlclose(handle) - return c_dlclose(handle) + dlclose = c_dlclose def dlsym(libhandle, name): """ Wrapper around C-level dlsym """ - if not we_are_translated(): - return t_dldym(libhandle, name) res = c_dlsym(libhandle, name) if not res: raise KeyError(name) diff --git a/rpython/rlib/test/test_rdynload.py b/rpython/rlib/test/test_rdynload.py --- a/rpython/rlib/test/test_rdynload.py +++ b/rpython/rlib/test/test_rdynload.py @@ -21,4 +21,3 @@ lltype.Signed)), dlsym(lib, 'abs')) assert 1 == handle(1) assert 1 == handle(-1) - dlclose(lib) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -9,6 +9,7 @@ from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import ll_str, llmemory +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.lltypesystem.lltype import (GcStruct, Signed, Array, Char, UniChar, Ptr, malloc, Bool, Void, GcArray, nullptr, cast_primitive, typeOf, staticAdtMethod, GcForwardReference) @@ -402,6 +403,46 @@ return result @jit.elidable + def ll_strip_default(s, left, right): + s_len = len(s.chars) + if s_len == 0: + return s.empty() + lpos = 0 + rpos = s_len - 1 + if left: + while lpos < rpos and s.chars[lpos].isspace(): + lpos += 1 + if right: + while lpos < rpos + 1 and s.chars[rpos].isspace(): + rpos -= 1 + if rpos < lpos: + return s.empty() + r_len = rpos - lpos + 1 + result = s.malloc(r_len) + s.copy_contents(s, result, lpos, 0, r_len) + return result + + @jit.elidable + def ll_strip_multiple(s, s2, left, right): + s_len = len(s.chars) + if s_len == 0: + return s.empty() + lpos = 0 + rpos = s_len - 1 + if left: + while lpos < rpos and LLHelpers.ll_contains(s2, s.chars[lpos]): + lpos += 1 + if right: + while lpos < rpos + 1 and LLHelpers.ll_contains(s2, s.chars[rpos]): + rpos -= 1 + if rpos < lpos: + return s.empty() + r_len = rpos - lpos + 1 + result = s.malloc(r_len) + s.copy_contents(s, result, lpos, 0, r_len) + return result + + @jit.elidable def ll_upper(s): s_chars = s.chars s_len = len(s_chars) diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -231,11 +231,22 @@ def rtype_method_strip(self, hop, left=True, right=True): rstr = hop.args_r[0].repr v_str = hop.inputarg(rstr.repr, arg=0) - v_char = hop.inputarg(rstr.char_repr, arg=1) - v_left = hop.inputconst(Bool, left) - v_right = hop.inputconst(Bool, right) + args_v = [v_str] + if len(hop.args_s) == 2: + if isinstance(hop.args_s[1], annmodel.SomeString): + v_stripstr = hop.inputarg(rstr.repr, arg=1) + args_v.append(v_stripstr) + func = self.ll.ll_strip_multiple + else: + v_char = hop.inputarg(rstr.char_repr, arg=1) + args_v.append(v_char) + func = self.ll.ll_strip + else: + func = self.ll.ll_strip_default + args_v.append(hop.inputconst(Bool, left)) + args_v.append(hop.inputconst(Bool, right)) hop.exception_is_here() - return hop.gendirectcall(self.ll.ll_strip, v_str, v_char, v_left, v_right) + return hop.gendirectcall(func, *args_v) def rtype_method_lstrip(self, hop): return self.rtype_method_strip(hop, left=True, right=False) diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -9,6 +9,7 @@ from rpython.rtyper.rstr import AbstractLLHelpers from rpython.rtyper.rtyper import TyperError from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.rtyper.annlowlevel import llstr, hlstr def test_parse_fmt(): @@ -457,6 +458,29 @@ res = self.interpret(left2, []) assert self.ll_to_string(res) == const('a') + def test_strip_multiple_chars(self): + const = self.const + def both(): + return const('!ab!').strip(const('!a')) + def left(): + return const('!+ab!').lstrip(const('!+')) + def right(): + return const('!ab!+').rstrip(const('!+')) + def empty(): + return const(' \t\t ').strip('\t ') + def left2(): + return const('a ').strip(' \t') + res = self.interpret(both, []) + assert self.ll_to_string(res) == const('b') + res = self.interpret(left, []) + assert self.ll_to_string(res) == const('ab!') + res = self.interpret(right, []) + assert self.ll_to_string(res) == const('!ab') + res = self.interpret(empty, []) + assert self.ll_to_string(res) == const('') + res = self.interpret(left2, []) + assert self.ll_to_string(res) == const('a') + def test_upper(self): const = self.const constchar = self.constchar @@ -1143,3 +1167,16 @@ self.interpret(f, [array, 4]) assert list(array) == list('abc'*4) lltype.free(array, flavor='raw') + + def test_strip_no_arg(self): + strings = [" xyz ", "", "\t\vx"] + + def f(i): + return strings[i].strip() + + res = self.interpret(f, [0]) + assert hlstr(res) == "xyz" + res = self.interpret(f, [1]) + assert hlstr(res) == "" + res = self.interpret(f, [2]) + assert hlstr(res) == "x" diff --git a/rpython/rtyper/test/test_runicode.py b/rpython/rtyper/test/test_runicode.py --- a/rpython/rtyper/test/test_runicode.py +++ b/rpython/rtyper/test/test_runicode.py @@ -282,6 +282,7 @@ test_int_valueerror = unsupported test_float = unsupported test_hlstr = unsupported + test_strip_multiple_chars = unsupported def test_hash_via_type(self): from rpython.rlib.objectmodel import compute_hash diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -49,7 +49,7 @@ sys.stdout.flush() -if sys.platform != 'win32' and hasattr(os, 'fork'): +if sys.platform != 'win32' and hasattr(os, 'fork') and not os.getenv("PYPY_DONT_RUN_SUBPROCESS", None): # do this at import-time, when the process is still tiny _source = os.path.dirname(os.path.abspath(__file__)) _source = os.path.join(_source, 'runsubprocess.py') # and not e.g. '.pyc' diff --git a/rpython/translator/c/src/dtoa.c b/rpython/translator/c/src/dtoa.c --- a/rpython/translator/c/src/dtoa.c +++ b/rpython/translator/c/src/dtoa.c @@ -2329,7 +2329,7 @@ static char * __Py_dg_dtoa(double dd, int mode, int ndigits, - Signed *decpt, Signed *sign, char **rve) + int *decpt, int *sign, char **rve) { /* Arguments ndigits, decpt, sign are similar to those of ecvt and fcvt; trailing zeros are suppressed from @@ -2952,7 +2952,7 @@ } char * _PyPy_dg_dtoa(double dd, int mode, int ndigits, - Signed *decpt, Signed *sign, char **rve) + int *decpt, int *sign, char **rve) { char* result; _PyPy_SET_53BIT_PRECISION_HEADER; diff --git a/rpython/translator/c/src/dtoa.h b/rpython/translator/c/src/dtoa.h --- a/rpython/translator/c/src/dtoa.h +++ b/rpython/translator/c/src/dtoa.h @@ -2,6 +2,6 @@ double _PyPy_dg_strtod(const char *str, char **ptr); char * _PyPy_dg_dtoa(double d, int mode, int ndigits, - Signed *decpt, Signed *sign, char **rve); + int *decpt, int *sign, char **rve); void _PyPy_dg_freedtoa(char *s); diff --git a/rpython/translator/c/test/test_exception.py b/rpython/translator/c/test/test_exception.py --- a/rpython/translator/c/test/test_exception.py +++ b/rpython/translator/c/test/test_exception.py @@ -156,3 +156,20 @@ assert res == 42 res = f1(0) assert res == 100 + +def test_dict_keyerror_inside_try_finally(): + class CtxMgr: + def __enter__(self): + return 42 + def __exit__(self, *args): + pass + def fn(x): + d = {5: x} + with CtxMgr() as forty_two: + try: + return d[x] + except KeyError: + return forty_two + f1 = getcompiledopt(fn, [int]) + res = f1(100) + assert res == 42 diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py --- a/rpython/translator/c/test/test_genc.py +++ b/rpython/translator/c/test/test_genc.py @@ -574,6 +574,22 @@ fn = compile(chooser, [bool]) assert fn(True) +def test_ordered_dict(): + try: + from collections import OrderedDict + except ImportError: + py.test.skip("Please update to Python 2.7") + + expected = [('ea', 1), ('bb', 2), ('c', 3), ('d', 4), ('e', 5), + ('ef', 6)] + d = OrderedDict(expected) + + def f(): + assert d.items() == expected + + fn = compile(f, []) + fn() + def test_inhibit_tail_call(): def foobar_fn(n): return 42 From noreply at buildbot.pypy.org Mon Nov 25 16:21:54 2013 From: noreply at buildbot.pypy.org (krono) Date: Mon, 25 Nov 2013 16:21:54 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in krono/pypy/osx-eci-frameworks-makefile (pull request #195) Message-ID: <20131125152154.DD7791C147D@cobra.cs.uni-duesseldorf.de> Author: Tobias Pape Branch: Changeset: r68318:48ee7b6ba963 Date: 2013-11-25 16:21 +0100 http://bitbucket.org/pypy/pypy/changeset/48ee7b6ba963/ Log: Merged in krono/pypy/osx-eci-frameworks-makefile (pull request #195) OSX: Ensure frameworks end up in Makefile when specified in External compilation info diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -49,6 +49,18 @@ response_file = relto.bestrelpath(response_file) return ["-Wl,-exported_symbols_list,%s" % (response_file,)] + def gen_makefile(self, cfiles, eci, exe_name=None, path=None, + shared=False): + # ensure frameworks are passed in the Makefile + fs = self._frameworks(eci.frameworks) + if len(fs) > 0: + # concat (-framework, FrameworkName) pairs + self.extra_libs += tuple(map(" ".join, zip(fs[::2], fs[1::2]))) + mk = super(Darwin, self).gen_makefile(cfiles, eci, exe_name, path, + shared) + return mk + + class Darwin_i386(Darwin): name = "darwin_i386" link_flags = ('-arch', 'i386', '-mmacosx-version-min=10.4') diff --git a/rpython/translator/platform/test/test_darwin.py b/rpython/translator/platform/test/test_darwin.py --- a/rpython/translator/platform/test/test_darwin.py +++ b/rpython/translator/platform/test/test_darwin.py @@ -16,9 +16,14 @@ host_factory = Darwin_i386 else: host_factory = Darwin_x86_64 +elif platform.machine() == 'x86_64': + host_factory = Darwin_x86_64 else: host_factory = Darwin_PowerPC +def is_x86(): + return platform.machine() == 'i386' or platform.machine() == 'x86_64' + class TestDarwin(BasicTest): platform = host_factory() @@ -47,8 +52,39 @@ res = self.platform.execute(executable) self.check_res(res) + def test_frameworks_with_makefile(self): + from StringIO import StringIO + tmpdir = udir.join('fw_mk' + self.__class__.__name__).ensure(dir=1) + objcfile = tmpdir.join('test_simple.m') + objcfile.write(r''' + #import + int main (int argc, const char * argv[]) { + NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init]; + NSArray *args = [[NSProcessInfo processInfo] arguments]; + NSCountedSet *cset = [[NSCountedSet alloc] initWithArray:args]; + + printf("%d\n", 23); + + [cset release]; + [pool release]; + return 0; + } + ''') + eci = ExternalCompilationInfo(frameworks=('Cocoa',)) + mk = self.platform.gen_makefile([objcfile], eci, path=tmpdir) + # The framework should end up in the Makefile + out = StringIO() + mk.write(out) + assert "-framework Cocoa" in out.getvalue() + # check that it actually works + mk.write() + self.platform.execute_makefile(mk) + res = self.platform.execute(tmpdir.join('test_simple')) + self.check_res(res, expected="23\n") + + def test_64_32_results(self): - if platform.machine() != 'i386': + if not is_x86(): py.test.skip("i386 only") plat32 = Darwin_i386() plat64 = Darwin_x86_64() @@ -72,7 +108,7 @@ self.check_res(res, '1\n') def test_longsize(self): - if platform.machine() != 'i386': + if not is_x86(): py.test.skip("i386 only") cfile = udir.join('test_int_size.c') cfile.write(r''' @@ -88,9 +124,9 @@ executable = self.platform.compile([cfile], eci) res = self.platform.execute(executable) self.check_res(res, str(sys.maxint) + '\n') - + def test_32bit_makefile(self): - if platform.machine() != 'i386': + if not is_x86(): py.test.skip("i386 only") plat32 = Darwin_i386() plat64 = Darwin_x86_64() @@ -124,4 +160,3 @@ plat64.execute_makefile(mk) res = plat64.execute(tmpdir.join('test_int_size')) self.check_res(res, '1\n') - From noreply at buildbot.pypy.org Mon Nov 25 16:21:53 2013 From: noreply at buildbot.pypy.org (krono) Date: Mon, 25 Nov 2013 16:21:53 +0100 (CET) Subject: [pypy-commit] pypy osx-eci-frameworks-makefile: OSX: Ensure frameworks end up in Makefile when specified in External compilation info Message-ID: <20131125152153.56B3F1C1473@cobra.cs.uni-duesseldorf.de> Author: Tobias Pape Branch: osx-eci-frameworks-makefile Changeset: r68317:f2f414ba68d4 Date: 2013-10-30 14:31 +0100 http://bitbucket.org/pypy/pypy/changeset/f2f414ba68d4/ Log: OSX: Ensure frameworks end up in Makefile when specified in External compilation info diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -49,6 +49,18 @@ response_file = relto.bestrelpath(response_file) return ["-Wl,-exported_symbols_list,%s" % (response_file,)] + def gen_makefile(self, cfiles, eci, exe_name=None, path=None, + shared=False): + # ensure frameworks are passed in the Makefile + fs = self._frameworks(eci.frameworks) + if len(fs) > 0: + # concat (-framework, FrameworkName) pairs + self.extra_libs += tuple(map(" ".join, zip(fs[::2], fs[1::2]))) + mk = super(Darwin, self).gen_makefile(cfiles, eci, exe_name, path, + shared) + return mk + + class Darwin_i386(Darwin): name = "darwin_i386" link_flags = ('-arch', 'i386', '-mmacosx-version-min=10.4') diff --git a/rpython/translator/platform/test/test_darwin.py b/rpython/translator/platform/test/test_darwin.py --- a/rpython/translator/platform/test/test_darwin.py +++ b/rpython/translator/platform/test/test_darwin.py @@ -16,9 +16,14 @@ host_factory = Darwin_i386 else: host_factory = Darwin_x86_64 +elif platform.machine() == 'x86_64': + host_factory = Darwin_x86_64 else: host_factory = Darwin_PowerPC +def is_x86(): + return platform.machine() == 'i386' or platform.machine() == 'x86_64' + class TestDarwin(BasicTest): platform = host_factory() @@ -47,8 +52,39 @@ res = self.platform.execute(executable) self.check_res(res) + def test_frameworks_with_makefile(self): + from StringIO import StringIO + tmpdir = udir.join('fw_mk' + self.__class__.__name__).ensure(dir=1) + objcfile = tmpdir.join('test_simple.m') + objcfile.write(r''' + #import + int main (int argc, const char * argv[]) { + NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init]; + NSArray *args = [[NSProcessInfo processInfo] arguments]; + NSCountedSet *cset = [[NSCountedSet alloc] initWithArray:args]; + + printf("%d\n", 23); + + [cset release]; + [pool release]; + return 0; + } + ''') + eci = ExternalCompilationInfo(frameworks=('Cocoa',)) + mk = self.platform.gen_makefile([objcfile], eci, path=tmpdir) + # The framework should end up in the Makefile + out = StringIO() + mk.write(out) + assert "-framework Cocoa" in out.getvalue() + # check that it actually works + mk.write() + self.platform.execute_makefile(mk) + res = self.platform.execute(tmpdir.join('test_simple')) + self.check_res(res, expected="23\n") + + def test_64_32_results(self): - if platform.machine() != 'i386': + if not is_x86(): py.test.skip("i386 only") plat32 = Darwin_i386() plat64 = Darwin_x86_64() @@ -72,7 +108,7 @@ self.check_res(res, '1\n') def test_longsize(self): - if platform.machine() != 'i386': + if not is_x86(): py.test.skip("i386 only") cfile = udir.join('test_int_size.c') cfile.write(r''' @@ -88,9 +124,9 @@ executable = self.platform.compile([cfile], eci) res = self.platform.execute(executable) self.check_res(res, str(sys.maxint) + '\n') - + def test_32bit_makefile(self): - if platform.machine() != 'i386': + if not is_x86(): py.test.skip("i386 only") plat32 = Darwin_i386() plat64 = Darwin_x86_64() @@ -124,4 +160,3 @@ plat64.execute_makefile(mk) res = plat64.execute(tmpdir.join('test_int_size')) self.check_res(res, '1\n') - From noreply at buildbot.pypy.org Mon Nov 25 20:32:41 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 25 Nov 2013 20:32:41 +0100 (CET) Subject: [pypy-commit] buildbot numpy-tests: replace git's parseGotRevision to add something like a revision number Message-ID: <20131125193241.21B461C0161@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-tests Changeset: r902:fedbf76c4d87 Date: 2013-11-25 21:32 +0200 http://bitbucket.org/pypy/buildbot/changeset/fedbf76c4d87/ Log: replace git's parseGotRevision to add something like a revision number diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -1,6 +1,6 @@ from buildbot.steps.source.mercurial import Mercurial from buildbot.steps.source.git import Git -from buildbot.process.buildstep import BuildStep +from buildbot.process.buildstep import BuildStep, BuildStepFailed from buildbot.process import factory from buildbot.steps import shell, transfer from buildbot.steps.trigger import Trigger @@ -8,6 +8,8 @@ from buildbot import locks from pypybuildbot.util import symlink_force from buildbot.status.results import SKIPPED, SUCCESS +from twisted.python import log +from twisted.internet import defer import os # buildbot supports SlaveLocks, which can be used to limit the amout of builds @@ -187,30 +189,6 @@ # changeset-id for got_revision and final_file_name and sorting the builds # chronologically -class UpdateGitCheckout(ShellCmd): - description = 'git checkout' - command = 'UNKNOWN' - - def __init__(self, workdir=None, haltOnFailure=True, force_branch=None, - **kwargs): - ShellCmd.__init__(self, workdir=workdir, haltOnFailure=haltOnFailure, - **kwargs) - self.force_branch = force_branch - self.addFactoryArguments(force_branch=force_branch) - - def start(self): - if self.force_branch is not None: - branch = self.force_branch - # Note: We could add a warning to the output if we - # ignore the branch set by the user. - else: - properties = self.build.getProperties() - branch = properties['branch'] or 'default' - command = ["git", "checkout", "-f", branch] - self.setCommand(command) - ShellCmd.start(self) - - class CheckGotRevision(ShellCmd): description = 'got_revision' command = ['hg', 'parents', '--template', 'got_revision:{rev}:{node}'] @@ -326,14 +304,23 @@ workdir=workdir, logEnviron=False)) +class PyPyGit(Git): + @defer.inlineCallbacks + def parseGotRevision(self, _=None): + stdout = yield self._dovccmd(['describe', '--tags', 'HEAD'], collectStdout=True) + revision = ':'.join(stdout.strip().split('-')[-2:]) + log.msg("Got Git revision %s" % (revision, )) + self.updateSourceProperty('got_revision', revision) + defer.returnValue(0) + def update_git(platform, factory, repourl, workdir, use_branch, - force_branch=None): - factory.addStep( - Git( + force_branch='HEAD'): + factory.addStep( PyPyGit( repourl=repourl, mode='full', method='fresh', workdir=workdir, + branch=force_branch, logEnviron=False)) def setup_steps(platform, factory, workdir=None, diff --git a/bot2/pypybuildbot/test/test_builds.py b/bot2/pypybuildbot/test/test_builds.py --- a/bot2/pypybuildbot/test/test_builds.py +++ b/bot2/pypybuildbot/test/test_builds.py @@ -77,11 +77,11 @@ translateInst = builds.Translate(['-O0'], ['--no-allworkingmodules']) assert translateInst.command[-len(expected):] == expected - + translateFactory = translateInst._getStepFactory().factory args = translateInst._getStepFactory().args rebuiltTranslate = translateFactory(*args) - + assert rebuiltTranslate.command[-len(expected):] == expected rebuiltTranslate.build = FakeBuild() @@ -108,7 +108,7 @@ 'base-latest')) class TestPytestCmd(object): - + class Fake(object): def __init__(self, **kwds): self.__dict__.update(kwds) @@ -154,7 +154,7 @@ summary = builder.summary_by_branch_and_revision[('trunk', '123')] assert summary.to_tuple() == (1, 1, 2, 0) - def test_branch_is_None(self): + def test_branch_is_None(self): step, cmd, builder = self._create(log='', rev='123', branch=None) step.commandComplete(cmd) assert ('trunk', '123') in builder.summary_by_branch_and_revision @@ -163,7 +163,7 @@ step, cmd, builder = self._create(log='', rev='123', branch='branch/foo/') step.commandComplete(cmd) assert ('branch/foo', '123') in builder.summary_by_branch_and_revision - + def test_multiple_logs(self): log = """F a/b.py:test_one . a/b.py:test_two From noreply at buildbot.pypy.org Tue Nov 26 01:24:11 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 26 Nov 2013 01:24:11 +0100 (CET) Subject: [pypy-commit] pypy py3k: utilize rpython's new lstrip Message-ID: <20131126002411.991781C144A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68323:219abd35c165 Date: 2013-11-25 15:05 -0800 http://bitbucket.org/pypy/pypy/changeset/219abd35c165/ Log: utilize rpython's new lstrip diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -81,15 +81,9 @@ source, flags = source_as_str(space, w_prog, 'eval', "string, bytes or code", consts.PyCF_SOURCE_IS_UTF8) - # source.lstrip(' \t') - for i, c in enumerate(source): - if c not in ' \t': - if i: - source = source[i:] - break - ec = space.getexecutioncontext() - code = ec.compiler.compile(source, "", 'eval', flags) + code = ec.compiler.compile(source.lstrip(' \t'), "", 'eval', + flags) # XXX: skip adding of __builtins__ to w_globals. it requires a # costly gettopframe_nohidden() here and nobody seems to miss its From noreply at buildbot.pypy.org Tue Nov 26 01:24:09 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 26 Nov 2013 01:24:09 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: fix the not really indirect anyway properties Message-ID: <20131126002409.28DD91C1176@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68321:4a8cc991806d Date: 2013-11-25 16:20 -0800 http://bitbucket.org/pypy/pypy/changeset/4a8cc991806d/ Log: fix the not really indirect anyway properties diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -342,6 +342,11 @@ def descr_getnewargs(self, space): return space.newtuple([wrapint(space, space.int_w(self))]) + def descr_repr(self, space): + res = str(self.int_w(space)) + return space.wrap(res) + descr_str = func_with_new_name(descr_repr, 'descr_str') + def descr_conjugate(self, space): "Returns self, the complex conjugate of any int." return space.int(self) @@ -364,32 +369,15 @@ val >>= 1 return space.wrap(bits) - """ def descr_get_numerator(self, space): return space.int(self) + descr_get_real = func_with_new_name(descr_get_numerator, 'descr_get_real') def descr_get_denominator(self, space): return space.wrap(1) - def descr_get_real(self, space): - return space.int(self) - def descr_get_imag(self, space): return space.wrap(0) - """ - -# XXX: -def descr_get_numerator(space, w_obj): - return space.int(w_obj) - -def descr_get_denominator(space, w_obj): - return space.wrap(1) - -def descr_get_real(space, w_obj): - return space.int(w_obj) - -def descr_get_imag(space, w_obj): - return space.wrap(0) class W_IntObject(W_AbstractIntObject): @@ -447,11 +435,6 @@ a = self.intval return space.newint(a) - def descr_repr(self, space): - res = str(self.intval) - return space.wrap(res) - descr_str = func_with_new_name(descr_repr, 'descr_str') - def _delegate_Int2Long(space, w_intobj): from pypy.objspace.std.longobject import W_LongObject return W_LongObject.fromint(space, w_intobj.int_w(space)) @@ -624,7 +607,7 @@ # ____________________________________________________________ -W_IntObject.typedef = StdTypeDef("int", +W_AbstractIntObject.typedef = StdTypeDef("int", __doc__ = '''int(x[, base]) -> integer Convert a string or number to an integer, if possible. A floating point @@ -635,17 +618,15 @@ will be returned instead.''', __new__ = interp2app(descr__new__), + numerator = typedef.GetSetProperty( + W_AbstractIntObject.descr_get_numerator), + denominator = typedef.GetSetProperty( + W_AbstractIntObject.descr_get_denominator), + real = typedef.GetSetProperty(W_AbstractIntObject.descr_get_real), + imag = typedef.GetSetProperty(W_AbstractIntObject.descr_get_imag), conjugate = interpindirect2app(W_AbstractIntObject.descr_conjugate), bit_length = interpindirect2app(W_AbstractIntObject.descr_bit_length), - # XXX: need a GetSetIndirectProperty - #numerator = typedef.GetSetProperty(W_IntObject.descr_get_numerator), - #denominator = typedef.GetSetProperty(W_IntObject.descr_get_denominator), - #real = typedef.GetSetProperty(W_IntObject.descr_get_real), - #imag = typedef.GetSetProperty(W_IntObject.descr_get_imag), - numerator = typedef.GetSetProperty(descr_get_numerator), - denominator = typedef.GetSetProperty(descr_get_denominator), - real = typedef.GetSetProperty(descr_get_real), - imag = typedef.GetSetProperty(descr_get_imag), + __int__ = interpindirect2app(W_AbstractIntObject.int), __long__ = interpindirect2app(W_AbstractIntObject.descr_long), @@ -695,6 +676,6 @@ __hex__ = interpindirect2app(W_AbstractIntObject.descr_hex), __getnewargs__ = interpindirect2app(W_AbstractIntObject.descr_getnewargs), - __repr__ = interp2app(W_IntObject.descr_repr), - __str__ = interp2app(W_IntObject.descr_str), + __repr__ = interp2app(W_AbstractIntObject.descr_repr), + __str__ = interp2app(W_AbstractIntObject.descr_str), ) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -166,6 +166,16 @@ # XXX: consider stian's branch where he optimizes long + ints return space.newtuple([self, w_other]) + def descr_get_numerator(self, space): + return space.long(self) + descr_get_real = func_with_new_name(descr_get_numerator, 'descr_get_real') + + def descr_get_denominator(self, space): + return space.newlong(1) + + def descr_get_imag(self, space): + return space.newlong(0) + class W_LongObject(W_AbstractLongObject): """This is a wrapper of rbigint.""" @@ -242,19 +252,6 @@ def __repr__(self): return '' % self.num.tolong() - # XXX: make these indirect - def descr_get_numerator(self, space): - return space.long(self) - - def descr_get_denominator(self, space): - return space.newlong(1) - - def descr_get_real(self, space): - return space.long(self) - - def descr_get_imag(self, space): - return space.newlong(0) - def descr_long(self, space): # long__Long is supposed to do nothing, unless it has a derived # long object, where it should return an exact one. @@ -620,12 +617,14 @@ string, use the optional base. It is an error to supply a base when converting a non-string.""", __new__ = interp2app(descr__new__), + + numerator = typedef.GetSetProperty( + W_AbstractLongObject.descr_get_numerator), + denominator = typedef.GetSetProperty( + W_AbstractLongObject.descr_get_denominator), + real = typedef.GetSetProperty(W_AbstractLongObject.descr_get_real), + imag = typedef.GetSetProperty(W_AbstractLongObject.descr_get_imag), conjugate = interp2app(W_AbstractLongObject.descr_conjugate), - # XXX: need indirect for these - numerator = typedef.GetSetProperty(W_LongObject.descr_get_numerator), - denominator = typedef.GetSetProperty(W_LongObject.descr_get_denominator), - real = typedef.GetSetProperty(W_LongObject.descr_get_real), - imag = typedef.GetSetProperty(W_LongObject.descr_get_imag), bit_length = interp2app(W_AbstractLongObject.descr_bit_length), # XXX: likely need indirect everything for SmallLong From noreply at buildbot.pypy.org Tue Nov 26 01:24:10 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 26 Nov 2013 01:24:10 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20131126002410.77B801C1437@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68322:a0f4678afc88 Date: 2013-11-20 11:22 -0800 http://bitbucket.org/pypy/pypy/changeset/a0f4678afc88/ Log: merge default diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -460,13 +460,13 @@ check_negative_slice(start, end, "count") return SomeInteger(nonneg=True) - def method_strip(str, chr): + def method_strip(str, chr=None): return str.basestringclass(no_nul=str.no_nul) - def method_lstrip(str, chr): + def method_lstrip(str, chr=None): return str.basestringclass(no_nul=str.no_nul) - def method_rstrip(str, chr): + def method_rstrip(str, chr=None): return str.basestringclass(no_nul=str.no_nul) def method_join(str, s_list): diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -9,6 +9,7 @@ from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import ll_str, llmemory +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.lltypesystem.lltype import (GcStruct, Signed, Array, Char, UniChar, Ptr, malloc, Bool, Void, GcArray, nullptr, cast_primitive, typeOf, staticAdtMethod, GcForwardReference) @@ -402,6 +403,46 @@ return result @jit.elidable + def ll_strip_default(s, left, right): + s_len = len(s.chars) + if s_len == 0: + return s.empty() + lpos = 0 + rpos = s_len - 1 + if left: + while lpos < rpos and s.chars[lpos].isspace(): + lpos += 1 + if right: + while lpos < rpos + 1 and s.chars[rpos].isspace(): + rpos -= 1 + if rpos < lpos: + return s.empty() + r_len = rpos - lpos + 1 + result = s.malloc(r_len) + s.copy_contents(s, result, lpos, 0, r_len) + return result + + @jit.elidable + def ll_strip_multiple(s, s2, left, right): + s_len = len(s.chars) + if s_len == 0: + return s.empty() + lpos = 0 + rpos = s_len - 1 + if left: + while lpos < rpos and LLHelpers.ll_contains(s2, s.chars[lpos]): + lpos += 1 + if right: + while lpos < rpos + 1 and LLHelpers.ll_contains(s2, s.chars[rpos]): + rpos -= 1 + if rpos < lpos: + return s.empty() + r_len = rpos - lpos + 1 + result = s.malloc(r_len) + s.copy_contents(s, result, lpos, 0, r_len) + return result + + @jit.elidable def ll_upper(s): s_chars = s.chars s_len = len(s_chars) diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -231,11 +231,22 @@ def rtype_method_strip(self, hop, left=True, right=True): rstr = hop.args_r[0].repr v_str = hop.inputarg(rstr.repr, arg=0) - v_char = hop.inputarg(rstr.char_repr, arg=1) - v_left = hop.inputconst(Bool, left) - v_right = hop.inputconst(Bool, right) + args_v = [v_str] + if len(hop.args_s) == 2: + if isinstance(hop.args_s[1], annmodel.SomeString): + v_stripstr = hop.inputarg(rstr.repr, arg=1) + args_v.append(v_stripstr) + func = self.ll.ll_strip_multiple + else: + v_char = hop.inputarg(rstr.char_repr, arg=1) + args_v.append(v_char) + func = self.ll.ll_strip + else: + func = self.ll.ll_strip_default + args_v.append(hop.inputconst(Bool, left)) + args_v.append(hop.inputconst(Bool, right)) hop.exception_is_here() - return hop.gendirectcall(self.ll.ll_strip, v_str, v_char, v_left, v_right) + return hop.gendirectcall(func, *args_v) def rtype_method_lstrip(self, hop): return self.rtype_method_strip(hop, left=True, right=False) diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -9,6 +9,7 @@ from rpython.rtyper.rstr import AbstractLLHelpers from rpython.rtyper.rtyper import TyperError from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.rtyper.annlowlevel import llstr, hlstr def test_parse_fmt(): @@ -457,6 +458,29 @@ res = self.interpret(left2, []) assert self.ll_to_string(res) == const('a') + def test_strip_multiple_chars(self): + const = self.const + def both(): + return const('!ab!').strip(const('!a')) + def left(): + return const('!+ab!').lstrip(const('!+')) + def right(): + return const('!ab!+').rstrip(const('!+')) + def empty(): + return const(' \t\t ').strip('\t ') + def left2(): + return const('a ').strip(' \t') + res = self.interpret(both, []) + assert self.ll_to_string(res) == const('b') + res = self.interpret(left, []) + assert self.ll_to_string(res) == const('ab!') + res = self.interpret(right, []) + assert self.ll_to_string(res) == const('!ab') + res = self.interpret(empty, []) + assert self.ll_to_string(res) == const('') + res = self.interpret(left2, []) + assert self.ll_to_string(res) == const('a') + def test_upper(self): const = self.const constchar = self.constchar @@ -1143,3 +1167,16 @@ self.interpret(f, [array, 4]) assert list(array) == list('abc'*4) lltype.free(array, flavor='raw') + + def test_strip_no_arg(self): + strings = [" xyz ", "", "\t\vx"] + + def f(i): + return strings[i].strip() + + res = self.interpret(f, [0]) + assert hlstr(res) == "xyz" + res = self.interpret(f, [1]) + assert hlstr(res) == "" + res = self.interpret(f, [2]) + assert hlstr(res) == "x" diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -49,7 +49,7 @@ sys.stdout.flush() -if sys.platform != 'win32' and hasattr(os, 'fork'): +if sys.platform != 'win32' and hasattr(os, 'fork') and not os.getenv("PYPY_DONT_RUN_SUBPROCESS", None): # do this at import-time, when the process is still tiny _source = os.path.dirname(os.path.abspath(__file__)) _source = os.path.join(_source, 'runsubprocess.py') # and not e.g. '.pyc' From noreply at buildbot.pypy.org Tue Nov 26 01:24:07 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 26 Nov 2013 01:24:07 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: merge default Message-ID: <20131126002407.C63941C051C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68320:604c5b99a0b0 Date: 2013-11-25 15:08 -0800 http://bitbucket.org/pypy/pypy/changeset/604c5b99a0b0/ Log: merge default diff too long, truncating to 2000 out of 2797 lines diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # diff --git a/lib-python/2.7/test/test_old_mailbox.py b/lib-python/2.7/test/test_old_mailbox.py --- a/lib-python/2.7/test/test_old_mailbox.py +++ b/lib-python/2.7/test/test_old_mailbox.py @@ -73,7 +73,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -81,7 +83,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -90,8 +94,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 2) - self.assertTrue(self.mbox.next() is not None) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) diff --git a/lib-python/2.7/traceback.py b/lib-python/2.7/traceback.py --- a/lib-python/2.7/traceback.py +++ b/lib-python/2.7/traceback.py @@ -107,7 +107,7 @@ return list -def print_exception(etype, value, tb, limit=None, file=None): +def print_exception(etype, value, tb, limit=None, file=None, _encoding=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if @@ -123,7 +123,7 @@ if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) - lines = format_exception_only(etype, value) + lines = format_exception_only(etype, value, _encoding) for line in lines: _print(file, line, '') @@ -144,7 +144,7 @@ list = list + format_exception_only(etype, value) return list -def format_exception_only(etype, value): +def format_exception_only(etype, value, _encoding=None): """Format the exception part of a traceback. The arguments are the exception type and value such as given by @@ -170,12 +170,12 @@ if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or etype is None or type(etype) is str): - return [_format_final_exc_line(etype, value)] + return [_format_final_exc_line(etype, value, _encoding)] stype = etype.__name__ if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] + return [_format_final_exc_line(stype, value, _encoding)] # It was a syntax error; show exactly where the problem was found. lines = [] @@ -196,26 +196,26 @@ lines.append(' %s^\n' % ''.join(caretspace)) value = msg - lines.append(_format_final_exc_line(stype, value)) + lines.append(_format_final_exc_line(stype, value, _encoding)) return lines -def _format_final_exc_line(etype, value): +def _format_final_exc_line(etype, value, _encoding=None): """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) + valuestr = _some_str(value, _encoding) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line -def _some_str(value): +def _some_str(value, _encoding=None): try: return str(value) except Exception: pass try: value = unicode(value) - return value.encode("ascii", "backslashreplace") + return value.encode(_encoding or "ascii", "backslashreplace") except Exception: pass return '' % type(value).__name__ diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -268,10 +268,18 @@ if _has_load_extension(): _ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);") -_lib = _ffi.verify(""" -#include -""", libraries=['sqlite3'] -) +if sys.platform.startswith('freebsd'): + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'] + ) +else: + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'] + ) exported_sqlite_symbols = [ 'SQLITE_ALTER_TABLE', diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -112,6 +112,14 @@ incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform.startswith("freebsd"): + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include'] + linklibs = ['tk86', 'tcl86'] + libdirs = ['/usr/local/lib'] +elif sys.platform == 'win32': + incdirs = [] + linklibs = ['tcl85', 'tk85'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py --- a/lib_pypy/pyrepl/simple_interact.py +++ b/lib_pypy/pyrepl/simple_interact.py @@ -63,3 +63,6 @@ except KeyboardInterrupt: console.write("\nKeyboardInterrupt\n") console.resetbuffer() + except MemoryError: + console.write("\nMemoryError\n") + console.resetbuffer() diff --git a/pypy/doc/release-2.2.0.rst b/pypy/doc/release-2.2.0.rst --- a/pypy/doc/release-2.2.0.rst +++ b/pypy/doc/release-2.2.0.rst @@ -66,9 +66,9 @@ ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. You need to install NumPy separately with a virtualenv: ``pip install git+https://bitbucket.org/pypy/numpy.git``; - or by directly doing - ``git clone https://bitbucket.org/pypy/numpy.git``, - ``cd numpy``, ``python setup.py install``. + or directly: + ``git clone https://bitbucket.org/pypy/numpy.git``; + ``cd numpy``; ``pypy setup.py install``. * non-inlined calls have less overhead diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -9,3 +9,12 @@ .. branch: numpy-newbyteorder Clean up numpy types, add newbyteorder functionality + +.. branch: windows-packaging +Package tk/tcl runtime with win32 + +.. branch: armhf-singlefloat +JIT support for singlefloats on ARM using the hardfloat ABI + +.. branch: voidtype_strformat +Better support for record numpy arrays diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -73,11 +73,11 @@ https://bitbucket.org/pypy/pypy/downloads/local.zip Then expand it into the base directory (base_dir) and modify your environment to reflect this:: - set PATH=\bin;%PATH% - set INCLUDE=\include;%INCLUDE% - set LIB=\lib;%LIB% + set PATH=\bin;\tcltk\bin;%PATH% + set INCLUDE=\include;\tcltk\include;%INCLUDE% + set LIB=\lib;\tcltk\lib;%LIB% -Now you should be good to go. Read on for more information. +Now you should be good to go. Read on for more information. The Boehm garbage collector ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -109,11 +109,10 @@ The bz2 compression library ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Download http://bzip.org/1.0.5/bzip2-1.0.5.tar.gz and extract it in -the base directory. Then compile:: - - cd bzip2-1.0.5 + svn export http://svn.python.org/projects/external/bzip2-1.0.6 + cd bzip2-1.0.6 nmake -f makefile.msc + copy bzip.dll \bzip.dll The sqlite3 database library ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -122,8 +121,6 @@ wrapper is compiled when the module is imported for the first time. The sqlite3.dll should be version 3.6.21 for CPython2.7 compatablility. - - The expat XML parser ~~~~~~~~~~~~~~~~~~~~ @@ -146,13 +143,33 @@ use the one distributed by ActiveState, or the one from cygwin. In both case the perl interpreter must be found on the PATH. -Get http://www.openssl.org/source/openssl-0.9.8k.tar.gz and extract it -in the base directory. Then compile:: - + svn export http://svn.python.org/projects/external/openssl-0.9.8y + cd openssl-0.9.8y perl Configure VC-WIN32 ms\do_ms.bat nmake -f ms\nt.mak install +TkInter module support +~~~~~~~~~~~~~~~~~~~~~~ + +Note that much of this is taken from the cpython build process. +Tkinter is imported via cffi, so the module is optional. To recreate the tcltk +directory found for the release script, create the dlls, libs, headers and +runtime by running:: + + svn export http://svn.python.org/projects/external/tcl-8.5.2.1 tcl85 + svn export http://svn.python.org/projects/external/tk-8.5.2.0 tk85 + cd tcl85\win + nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 DEBUG=0 INSTALLDIR=..\..\tcltk clean all + nmake -f makefile.vc DEBUG=0 INSTALLDIR=..\..\tcltk install + cd ..\..\tk85\win + nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl85 clean all + nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl85 install + +Now you should have a tcktk\bin, tcltk\lib, and tcltk\include directory ready +for use. The release packaging script will pick up the tcltk runtime in the lib +directory and put it in the archive. + Using the mingw compiler ------------------------ diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -942,7 +942,8 @@ self.w_tmp_dir = self.space.wrap(tmp_dir) - foo_py = prefix.join('foo.py').write("pass") + foo_py = prefix.join('foo.py') + foo_py.write("pass") self.w_foo_py = self.space.wrap(str(foo_py)) def test_setup_bootstrap_path(self): diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1086,7 +1086,9 @@ assert strlenaddr == cast(BVoidP, strlen) def test_read_variable(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -1094,7 +1096,9 @@ assert stderr == cast(BVoidP, _testfunc(8)) def test_read_variable_as_unknown_length_array(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BCharP = new_pointer_type(new_primitive_type("char")) BArray = new_array_type(BCharP, None) @@ -1104,7 +1108,9 @@ # ^^ and not 'char[]', which is basically not allowed and would crash def test_write_variable(): - if sys.platform == 'win32' or sys.platform == 'darwin': + ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard + ## https://bugs.pypy.org/issue1643 + if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') diff --git a/pypy/module/binascii/interp_crc32.py b/pypy/module/binascii/interp_crc32.py --- a/pypy/module/binascii/interp_crc32.py +++ b/pypy/module/binascii/interp_crc32.py @@ -1,17 +1,12 @@ from pypy.interpreter.gateway import unwrap_spec +from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rarithmetic import r_uint, intmask -from rpython.rtyper.lltypesystem import rffi -from rpython.rlib.rzipfile import crc_32_tab +from rpython.rlib import rzipfile @unwrap_spec(data='bufferstr', oldcrc='truncatedint_w') def crc32(space, data, oldcrc=0): "Compute the CRC-32 incrementally." - crc = r_uint(rffi.cast(rffi.UINT, ~oldcrc)) # signed => 32-bit unsigned - - # in the following loop, we have always 0 <= crc < 2**32 - for c in data: - crc = crc_32_tab[(crc & 0xff) ^ ord(c)] ^ (crc >> 8) - - crc = ~intmask(rffi.cast(rffi.INT, crc)) # unsigned => 32-bit signed - return space.wrap(crc) + crc = rzipfile.crc32(data, r_uint(oldcrc)) + crc = rffi.cast(rffi.INT, crc) # unsigned => 32-bit signed + return space.wrap(intmask(crc)) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -26,6 +26,7 @@ from pypy.module.__builtin__.descriptor import W_Property from pypy.module.__builtin__.interp_classobj import W_ClassObject from pypy.module.__builtin__.interp_memoryview import W_MemoryView +from pypy.module.micronumpy.base import W_NDimArray from rpython.rlib.entrypoint import entrypoint_lowlevel from rpython.rlib.rposix import is_valid_fd, validate_fd from rpython.rlib.unroll import unrolling_iterable @@ -469,6 +470,7 @@ "Complex": "space.w_complex", "ByteArray": "space.w_bytearray", "MemoryView": "space.gettypeobject(W_MemoryView.typedef)", + "Array": "space.gettypeobject(W_NDimArray.typedef)", "BaseObject": "space.w_object", 'None': 'space.type(space.w_None)', 'NotImplemented': 'space.type(space.w_NotImplemented)', diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -25,6 +25,22 @@ #define Py_UNICODE_SIZE 2 #endif +#ifndef Py_BUILD_CORE /* not building the core - must be an ext */ +# if defined(_MSC_VER) + /* So MSVC users need not specify the .lib file in + * their Makefile (other compilers are generally + * taken care of by distutils.) */ +# ifdef _DEBUG +# error("debug first with cpython") +# pragma comment(lib,"python27.lib") +# else +# pragma comment(lib,"python27.lib") +# endif /* _DEBUG */ +# endif +#endif /* _MSC_VER */ + + + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/src/ndarrayobject.c b/pypy/module/cpyext/src/ndarrayobject.c --- a/pypy/module/cpyext/src/ndarrayobject.c +++ b/pypy/module/cpyext/src/ndarrayobject.c @@ -3,8 +3,6 @@ #include "numpy/arrayobject.h" #include /* memset, memcpy */ -PyTypeObject PyArray_Type; - void _PyArray_FILLWBYTE(PyObject* obj, int val) { memset(PyArray_DATA(obj), val, PyArray_NBYTES(obj)); diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -286,3 +286,19 @@ assert dt.num == 11 + def test_pass_ndarray_object_to_c(self): + from _numpypy.multiarray import ndarray + mod = self.import_extension('foo', [ + ("check_array", "METH_VARARGS", + ''' + PyObject* obj; + if (!PyArg_ParseTuple(args, "O!", &PyArray_Type, &obj)) + return NULL; + Py_INCREF(obj); + return obj; + '''), + ], prologue='#include ') + array = ndarray((3, 4), dtype='d') + assert mod.check_array(array) is array + raises(TypeError, "mod.check_array(42)") + diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -342,10 +342,8 @@ if space.is_w(w_startstop, space.w_None): start = 0 else: - start = space.int_w(w_startstop) - if start < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Indicies for islice() must be non-negative integers.")) + start = self.arg_int_w(w_startstop, 0, + "Indicies for islice() must be None or non-negative integers") w_stop = args_w[0] else: raise OperationError(space.w_TypeError, space.wrap("islice() takes at most 4 arguments (" + str(num_args) + " given)")) @@ -353,10 +351,8 @@ if space.is_w(w_stop, space.w_None): stop = -1 else: - stop = space.int_w(w_stop) - if stop < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Stop argument must be a non-negative integer or None.")) + stop = self.arg_int_w(w_stop, 0, + "Stop argument must be a non-negative integer or None.") stop = max(start, stop) # for obscure CPython compatibility if num_args == 2: @@ -364,10 +360,8 @@ if space.is_w(w_step, space.w_None): step = 1 else: - step = space.int_w(w_step) - if step < 1: - raise OperationError(space.w_ValueError, space.wrap( - "Step must be one or lager for islice().")) + step = self.arg_int_w(w_step, 1, + "Step for islice() must be a positive integer or None") else: step = 1 @@ -375,6 +369,18 @@ self.start = start self.stop = stop + def arg_int_w(self, w_obj, minimum, errormsg): + space = self.space + try: + result = space.int_w(w_obj) + except OperationError, e: + if e.async(space): + raise + result = -1 + if result < minimum: + raise OperationError(space.w_ValueError, space.wrap(errormsg)) + return result + def iter_w(self): return self.space.wrap(self) diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -304,6 +304,11 @@ raises(TypeError, itertools.islice, [], 0, 0, 0, 0) + # why not TypeError? Because CPython + raises(ValueError, itertools.islice, [], "a", 1, 2) + raises(ValueError, itertools.islice, [], 0, "a", 2) + raises(ValueError, itertools.islice, [], 0, 1, "a") + def test_chain(self): import itertools diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -80,7 +80,7 @@ # u = interp_marshal.StringUnmarshaller(space, space.wrap(expected)) w_long = u.load_w_obj() - assert space.eq_w(w_long, w_obj) is True + assert space.eq_w(w_long, w_obj) for sign in [1L, -1L]: for i in range(100): diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -80,7 +80,7 @@ return scalar.Scalar(self.dtype, self.getitem(0)) return None - def get_view(self, orig_array, dtype, new_shape): + def get_view(self, space, orig_array, dtype, new_shape): strides, backstrides = support.calc_strides(new_shape, dtype, self.order) return SliceArray(self.start, strides, backstrides, new_shape, @@ -211,7 +211,15 @@ "field named %s not found" % idx)) return RecordChunk(idx) if (space.isinstance_w(w_idx, space.w_int) or - space.isinstance_w(w_idx, space.w_slice)): + space.isinstance_w(w_idx, space.w_slice)): + return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) + elif isinstance(w_idx, W_NDimArray) and \ + isinstance(w_idx.implementation, scalar.Scalar): + w_idx = w_idx.get_scalar_value().item(space) + if not space.isinstance_w(w_idx, space.w_int) and \ + not space.isinstance_w(w_idx, space.w_bool): + raise OperationError(space.w_IndexError, space.wrap( + "arrays used as indices must be of integer (or boolean) type")) return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) elif space.is_w(w_idx, space.w_None): return Chunks([NewAxisChunk()]) diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -68,9 +68,15 @@ def transpose(self, _): return self - def get_view(self, orig_array, dtype, new_shape): + def get_view(self, space, orig_array, dtype, new_shape): scalar = Scalar(dtype) - scalar.value = self.value.convert_to(dtype) + if dtype.is_str_or_unicode(): + scalar.value = dtype.coerce(space, space.wrap(self.value.raw_str())) + elif dtype.is_record_type(): + raise OperationError(space.w_NotImplementedError, space.wrap( + "viewing scalar as record not implemented")) + else: + scalar.value = dtype.itemtype.runpack_str(space, self.value.raw_str()) return scalar def get_real(self, orig_array): @@ -127,19 +133,20 @@ if space.len_w(w_idx) == 0: return self.get_scalar_value() raise OperationError(space.w_IndexError, - space.wrap("scalars cannot be indexed")) + space.wrap("0-d arrays can't be indexed")) def getitem_index(self, space, idx): raise OperationError(space.w_IndexError, - space.wrap("scalars cannot be indexed")) + space.wrap("0-d arrays can't be indexed")) def descr_setitem(self, space, _, w_idx, w_val): raise OperationError(space.w_IndexError, - space.wrap("scalars cannot be indexed")) + space.wrap("0-d arrays can't be indexed")) def setitem_index(self, space, idx, w_val): raise OperationError(space.w_IndexError, - space.wrap("scalars cannot be indexed")) + space.wrap("0-d arrays can't be indexed")) + def set_shape(self, space, orig_array, new_shape): if not new_shape: return self diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -106,16 +106,26 @@ args_w = [convert_to_array(space, w_arg) for w_arg in args_w] dtype = args_w[0].get_dtype() shape = args_w[0].get_shape()[:] - _axis = axis + ndim = len(shape) + orig_axis = axis if axis < 0: - _axis = len(shape) + axis + axis = ndim + axis + if ndim == 1 and axis != 0: + axis = 0 + if axis < 0 or axis >= ndim: + raise operationerrfmt(space.w_IndexError, + "axis %d out of bounds [0, %d)", orig_axis, ndim) for arr in args_w[1:]: + if len(arr.get_shape()) != ndim: + raise OperationError(space.w_ValueError, space.wrap( + "all the input arrays must have same number of dimensions")) for i, axis_size in enumerate(arr.get_shape()): - if len(arr.get_shape()) != len(shape) or (i != _axis and axis_size != shape[i]): + if i == axis: + shape[i] += axis_size + elif axis_size != shape[i]: raise OperationError(space.w_ValueError, space.wrap( - "all the input arrays must have same number of dimensions")) - elif i == _axis: - shape[i] += axis_size + "all the input array dimensions except for the " + "concatenation axis must match exactly")) a_dt = arr.get_dtype() if dtype.is_record_type() and a_dt.is_record_type(): # Record types must match @@ -129,19 +139,17 @@ space.wrap("invalid type promotion")) dtype = interp_ufuncs.find_binop_result_dtype(space, dtype, arr.get_dtype()) - if _axis < 0 or len(arr.get_shape()) <= _axis: - raise operationerrfmt(space.w_IndexError, "axis %d out of bounds [0, %d)", axis, len(shape)) # concatenate does not handle ndarray subtypes, it always returns a ndarray res = W_NDimArray.from_shape(space, shape, dtype, 'C') chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: - if arr.get_shape()[_axis] == 0: + if arr.get_shape()[axis] == 0: continue - chunks[_axis] = Chunk(axis_start, axis_start + arr.get_shape()[_axis], 1, - arr.get_shape()[_axis]) + chunks[axis] = Chunk(axis_start, axis_start + arr.get_shape()[axis], 1, + arr.get_shape()[axis]) Chunks(chunks).apply(space, res).implementation.setslice(space, arr) - axis_start += arr.get_shape()[_axis] + axis_start += arr.get_shape()[axis] return res @unwrap_spec(repeats=int) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -272,11 +272,19 @@ from pypy.module.micronumpy.interp_dtype import W_Dtype dtype = space.interp_w(W_Dtype, space.call_function(space.gettypefor(W_Dtype), w_dtype)) + if dtype.get_size() == 0: + raise OperationError(space.w_TypeError, space.wrap( + "data-type must not be 0-sized")) if dtype.get_size() != self.get_dtype(space).get_size(): raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) - raise OperationError(space.w_NotImplementedError, space.wrap( - "view not implelemnted yet")) + if dtype.is_str_or_unicode(): + return dtype.coerce(space, space.wrap(self.raw_str())) + elif dtype.is_record_type(): + raise OperationError(space.w_NotImplementedError, space.wrap( + "viewing scalar as record not implemented")) + else: + return dtype.itemtype.runpack_str(space, self.raw_str()) def descr_self(self, space): return self @@ -413,6 +421,9 @@ def get_dtype(self, space): return self.arr.dtype + def raw_str(self): + return self.arr.dtype.itemtype.to_str(self) + class W_VoidBox(W_FlexibleBox): def descr_getitem(self, space, w_item): if space.isinstance_w(w_item, space.w_basestring): diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -56,6 +56,8 @@ self.aliases = aliases self.float_type = float_type self.fields = fields + if fieldnames is None: + fieldnames = [] self.fieldnames = fieldnames self.shape = list(shape) self.subdtype = subdtype @@ -151,6 +153,14 @@ endian = NPY_NATBYTE return space.wrap("%s%s%s" % (endian, basic, size)) + def descr_get_descr(self, space): + if not self.is_record_type(): + return space.newlist([space.newtuple([space.wrap(""), + self.descr_get_str(space)])]) + else: + raise OperationError(space.w_NotImplementedError, space.wrap( + "descr not implemented for record types")) + def descr_get_base(self, space): return space.wrap(self.base) @@ -206,15 +216,15 @@ self.name = "void" + str(8 * self.get_size()) def descr_get_names(self, space): - if self.fieldnames is None: + if len(self.fieldnames) == 0: return space.w_None return space.newtuple([space.wrap(name) for name in self.fieldnames]) def set_names(self, space, w_names): + self.fieldnames = [] if w_names == space.w_None: - self.fieldnames = None + return else: - self.fieldnames = [] iter = space.iter(w_names) while True: try: @@ -447,6 +457,7 @@ fields = GetSetProperty(W_Dtype.descr_get_fields), names = GetSetProperty(W_Dtype.descr_get_names), hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), + descr = GetSetProperty(W_Dtype.descr_get_descr), ) W_Dtype.typedef.acceptable_as_base_class = False @@ -854,24 +865,21 @@ for k, v in typeinfo_partial.iteritems(): space.setitem(w_typeinfo, space.wrap(k), space.gettypefor(v)) for k, dtype in typeinfo_full.iteritems(): - itemsize = dtype.get_size() + itembits = dtype.get_size() * 8 items_w = [space.wrap(dtype.char), space.wrap(dtype.num), - space.wrap(itemsize * 8), # in case of changing - # number of bits per byte in the future - space.wrap(itemsize / - (2 if dtype.kind == NPY_COMPLEXLTR else 1) - or 1)] + space.wrap(itembits), + space.wrap(dtype.itemtype.get_element_size())] if dtype.is_int_type(): if dtype.kind == NPY_GENBOOLLTR: w_maxobj = space.wrap(1) w_minobj = space.wrap(0) elif dtype.is_signed(): - w_maxobj = space.wrap(r_longlong((1 << (itemsize*8 - 1)) + w_maxobj = space.wrap(r_longlong((1 << (itembits - 1)) - 1)) - w_minobj = space.wrap(r_longlong(-1) << (itemsize*8 - 1)) + w_minobj = space.wrap(r_longlong(-1) << (itembits - 1)) else: - w_maxobj = space.wrap(r_ulonglong(1 << (itemsize*8)) - 1) + w_maxobj = space.wrap(r_ulonglong(1 << itembits) - 1) w_minobj = space.wrap(0) items_w = items_w + [w_maxobj, w_minobj] items_w = items_w + [dtype.w_box_type] diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -93,7 +93,11 @@ def descr_fill(self, space, w_value): self.fill(self.get_dtype().coerce(space, w_value)) - def descr_tostring(self, space): + def descr_tostring(self, space, w_order=None): + order = order_converter(space, w_order, NPY_CORDER) + if order == NPY_FORTRANORDER: + raise OperationError(space.w_NotImplementedError, space.wrap( + "unsupported value for order")) return space.wrap(loop.tostring(space, self)) def getitem_filter(self, space, arr): @@ -198,7 +202,8 @@ prefix) def descr_getitem(self, space, w_idx): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type(): + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + and len(w_idx.get_shape()) > 0: return self.getitem_filter(space, w_idx) try: return self.implementation.descr_getitem(space, self, w_idx) @@ -212,7 +217,8 @@ self.implementation.setitem_index(space, index_list, w_value) def descr_setitem(self, space, w_idx, w_value): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type(): + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + and len(w_idx.get_shape()) > 0: self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) return try: @@ -243,12 +249,13 @@ return space.wrap(self.dump_data()) return space.call_function(cache.w_array_str, self) - def dump_data(self): + def dump_data(self, prefix='array(', suffix=')'): i = self.create_iter() first = True dtype = self.get_dtype() s = StringBuilder() - s.append('array([') + s.append(prefix) + s.append('[') while not i.done(): if first: first = False @@ -256,7 +263,8 @@ s.append(', ') s.append(dtype.itemtype.str_format(i.getitem())) i.next() - s.append('])') + s.append(']') + s.append(suffix) return s.build() def create_iter(self, shape=None, backward_broadcast=False, require_index=False): @@ -704,7 +712,7 @@ return self return wrap_impl(space, space.type(self), self, self.implementation.get_view( - self, self.get_dtype(), new_shape)) + space, self, self.get_dtype(), new_shape)) def descr_strides(self, space): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -733,11 +741,14 @@ impl = self.implementation new_shape = self.get_shape()[:] dims = len(new_shape) + if new_itemsize == 0: + raise OperationError(space.w_TypeError, space.wrap( + "data-type must not be 0-sized")) if dims == 0: # Cannot resize scalars if old_itemsize != new_itemsize: raise OperationError(space.w_ValueError, space.wrap( - "new type not compatible with array shape")) + "new type not compatible with array.")) else: if dims == 1 or impl.get_strides()[0] < impl.get_strides()[-1]: # Column-major, resize first dimension @@ -751,7 +762,7 @@ raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) new_shape[-1] = new_shape[-1] * old_itemsize / new_itemsize - v = impl.get_view(self, dtype, new_shape) + v = impl.get_view(space, self, dtype, new_shape) w_ret = wrap_impl(space, w_type, self, v) return w_ret diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -61,10 +61,22 @@ def apply(self, space, orig_arr): arr = orig_arr.implementation ofs, subdtype = arr.dtype.fields[self.name] - # strides backstrides are identical, ofs only changes start - return W_NDimArray.new_slice(space, arr.start + ofs, arr.get_strides(), - arr.get_backstrides(), - arr.shape, arr, orig_arr, subdtype) + # ofs only changes start + # create a view of the original array by extending + # the shape, strides, backstrides of the array + from pypy.module.micronumpy.support import calc_strides + strides, backstrides = calc_strides(subdtype.shape, + subdtype.subdtype, arr.order) + final_shape = arr.shape + subdtype.shape + final_strides = arr.get_strides() + strides + final_backstrides = arr.get_backstrides() + backstrides + final_dtype = subdtype + print self.name,'strides',arr.get_strides(),strides + if subdtype.subdtype: + final_dtype = subdtype.subdtype + return W_NDimArray.new_slice(space, arr.start + ofs, final_strides, + final_backstrides, + final_shape, arr, orig_arr, final_dtype) class Chunks(BaseChunk): def __init__(self, l): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -490,7 +490,7 @@ if dtype.is_str_or_unicode(): val = dtype.coerce(space, space.wrap(sub)) else: - val = dtype.itemtype.runpack_str(sub) + val = dtype.itemtype.runpack_str(space, sub) ai.setitem(val) ai.next() i += 1 diff --git a/pypy/module/micronumpy/test/dummy_module.py b/pypy/module/micronumpy/test/dummy_module.py --- a/pypy/module/micronumpy/test/dummy_module.py +++ b/pypy/module/micronumpy/test/dummy_module.py @@ -26,7 +26,7 @@ del types types = ['Generic', 'Number', 'Integer', 'SignedInteger', 'UnsignedInteger', - 'Inexact', 'Floating', 'ComplexFloating', 'Character'] + 'Inexact', 'Floating', 'ComplexFloating', 'Flexible', 'Character'] for t in types: globals()[t.lower()] = typeinfo[t] diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -18,6 +18,22 @@ class AppTestDtypes(BaseAppTestDtypes): spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + def test_typeinfo(self): + import numpy as np + try: + from numpy.core.multiarray import typeinfo + except ImportError: + # running on dummy module + from numpypy import typeinfo + assert typeinfo['Number'] == np.number + assert typeinfo['LONGLONG'] == ('q', 9, 64, 8, 9223372036854775807L, + -9223372036854775808L, np.longlong) + assert typeinfo['VOID'] == ('V', 20, 0, 1, np.void) + assert typeinfo['BOOL'] == ('?', 0, 8, 1, 1, 0, np.bool_) + assert typeinfo['CFLOAT'] == ('F', 14, 64, 8, np.complex64) + assert typeinfo['CDOUBLE'] == ('D', 15, 128, 16, np.complex128) + assert typeinfo['HALF'] == ('e', 23, 16, 2, np.float16) + def test_dtype_basic(self): from numpypy import dtype @@ -816,13 +832,31 @@ assert x.dtype == int8 assert (x == array(42)).all() + def test_descr(self): + import numpy as np + assert np.dtype('= 0 + else: u = unicode_(3) - except NotImplementedError, e: - if e.message.find('not supported yet') >= 0: - skip('unicode box not implemented') - else: assert isinstance(u, unicode) def test_character_dtype(self): @@ -999,35 +1033,15 @@ assert a[0] == 1 assert (a + a)[1] == 4 -class AppTestPyPyOnly(BaseNumpyAppTest): - def setup_class(cls): - if option.runappdirect and '__pypy__' not in sys.builtin_module_names: - py.test.skip("pypy only test") - BaseNumpyAppTest.setup_class.im_func(cls) - - def test_typeinfo(self): - from numpypy import void, number, int64, bool_, complex64, complex128, float16 - try: - from numpy.core.multiarray import typeinfo - except ImportError: - # running on dummy module - from numpypy import typeinfo - assert typeinfo['Number'] == number - assert typeinfo['LONGLONG'] == ('q', 9, 64, 8, 9223372036854775807L, -9223372036854775808L, int64) - assert typeinfo['VOID'] == ('V', 20, 0, 1, void) - assert typeinfo['BOOL'] == ('?', 0, 8, 1, 1, 0, bool_) - assert typeinfo['CFLOAT'] == ('F', 14, 64, 4, complex64) - assert typeinfo['CDOUBLE'] == ('D', 15, 128, 8, complex128) - assert typeinfo['HALF'] == ('e', 23, 16, 2, float16) - class AppTestObjectDtypes(BaseNumpyAppTest): def test_scalar_from_object(self): from numpypy import array + import sys class Polynomial(object): pass - try: + if '__pypy__' in sys.builtin_module_names: + exc = raises(NotImplementedError, array, Polynomial()) + assert exc.value.message.find('unable to create dtype from objects') >= 0 + else: a = array(Polynomial()) assert a.shape == () - except NotImplementedError, e: - if e.message.find('unable to create dtype from objects')>=0: - skip('creating ojbect dtype not supported yet') diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -68,7 +68,8 @@ assert s.start == 1 assert s.strides == [2, 10, 50] assert s.backstrides == [6, 40, 100] - s = create_slice(self.space, a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), + Chunk(1, 0, 0, 1)]) assert s.shape == [2, 1] assert s.strides == [3, 10] assert s.backstrides == [3, 0] @@ -661,15 +662,17 @@ assert (b[newaxis] == [[2, 3, 4]]).all() def test_scalar(self): - from numpypy import array, dtype, int64 + from numpypy import array, dtype, int_ a = array(3) - raises(IndexError, "a[0]") - raises(IndexError, "a[0] = 5") + exc = raises(IndexError, "a[0]") + assert exc.value[0] == "0-d arrays can't be indexed" + exc = raises(IndexError, "a[0] = 5") + assert exc.value[0] == "0-d arrays can't be indexed" assert a.size == 1 assert a.shape == () assert a.dtype is dtype(int) b = a[()] - assert type(b) is int64 + assert type(b) is int_ assert b == 3 def test_len(self): @@ -1520,7 +1523,36 @@ assert arange(4, dtype='>c8').real.max() == 3.0 assert arange(4, dtype='i2')[::2].tostring() == '\x00\x01\x00\x03' assert array(0, dtype='i2').tostring() == '\x00\x00' + a = array([[1, 2], [3, 4]], dtype='i1') + for order in (None, False, 'C', 'K', 'a'): + assert a.tostring(order) == '\x01\x02\x03\x04' + import sys + for order in (True, 'F'): + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, a.tostring, order) + else: + assert a.tostring(order) == '\x01\x03\x02\x04' class AppTestRepr(BaseNumpyAppTest): @@ -2974,7 +3040,8 @@ from numpypy import dtype, array, zeros d = dtype([("x", "int", 3), ("y", "float", 5)]) - a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) + a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), + ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) for v in ['x', u'x', 0, -2]: assert (a[0][v] == [1, 2, 3]).all() @@ -2984,7 +3051,8 @@ assert (a[1][v] == [5.5, 6.5, 7.5, 8.5, 9.5]).all() for v in [-3, 2]: exc = raises(IndexError, "a[0][%d]" % v) - assert exc.value.message == "invalid index (%d)" % (v + 2 if v < 0 else v) + assert exc.value.message == "invalid index (%d)" % \ + (v + 2 if v < 0 else v) exc = raises(IndexError, "a[0]['z']") assert exc.value.message == "invalid index" exc = raises(IndexError, "a[0][None]") @@ -3028,13 +3096,17 @@ ] h = np.array(buf, dtype=descr) assert len(h) == 2 - skip('broken') # XXX - assert np.array_equal(h['x'], np.array([buf[0][0], - buf[1][0]], dtype='i4')) - assert np.array_equal(h['y'], np.array([buf[0][1], - buf[1][1]], dtype='f8')) - assert np.array_equal(h['z'], np.array([buf[0][2], - buf[1][2]], dtype='u1')) + assert h['x'].shape == (2, 2) + assert h['y'].strides == (41, 16, 8) + assert h['z'].shape == (2,) + for v in (h, h[0], h['x']): + repr(v) # check for crash in repr + assert (h['x'] == np.array([buf[0][0], + buf[1][0]], dtype='i4')).all() + assert (h['y'] == np.array([buf[0][1], + buf[1][1]], dtype='f8')).all() + assert (h['z'] == np.array([buf[0][2], + buf[1][2]], dtype='u1')).all() def test_multidim_subarray(self): from numpypy import dtype, array @@ -3052,10 +3124,27 @@ from numpypy import dtype, array d = dtype([("x", "int", 3), ("y", "float", 5)]) - a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) + a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), + ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) assert len(list(a[0])) == 2 + def test_3d_record(self): + from numpypy import dtype, array + dt = dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 2, 3))]) + a = array([('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]])], + dtype=dt) + s = str(a) + i = a.item() + assert isinstance(i, tuple) + assert len(i) == 4 + skip('incorrect formatting via dump_data') + assert s.endswith("[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " + "[[7, 8, 9], [10, 11, 12]]])]") + + def test_issue_1589(self): import numpypy as numpy c = numpy.array([[(1, 2, 'a'), (3, 4, 'b')], [(5, 6, 'c'), (7, 8, 'd')]], diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -102,6 +102,39 @@ assert b == v raises(IndexError, "v['blah']") + def test_view(self): + import numpy as np + import sys + s = np.dtype('int64').type(12) + exc = raises(ValueError, s.view, 'int8') + assert exc.value[0] == "new type not compatible with array." + t = s.view('double') + assert type(t) is np.double + assert t < 7e-323 + t = s.view('complex64') + assert type(t) is np.complex64 + assert 0 < t.real < 1 + assert t.imag == 0 + exc = raises(TypeError, s.view, 'string') + assert exc.value[0] == "data-type must not be 0-sized" + t = s.view('S8') + assert type(t) is np.string_ + assert t == '\x0c' + s = np.dtype('string').type('abc1') + assert s.view('S4') == 'abc1' + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, s.view, [('a', 'i2'), ('b', 'i2')]) + else: + b = s.view([('a', 'i2'), ('b', 'i2')]) + assert b.shape == () + assert b[0] == 25185 + assert b[1] == 12643 + if '__pypy__' in sys.builtin_module_names: + raises(TypeError, "np.dtype([('a', 'int64'), ('b', 'int64')]).type('a' * 16)") + else: + s = np.dtype([('a', 'int64'), ('b', 'int64')]).type('a' * 16) + assert s.view('S16') == 'a' * 16 + def test_complex_scalar_complex_cast(self): import numpy as np for tp in [np.csingle, np.cdouble, np.clongdouble]: diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -197,7 +197,7 @@ for i in xrange(start, stop, width): self._write(storage, i, offset, value) - def runpack_str(self, s): + def runpack_str(self, space, s): v = runpack(self.format_code, s) return self.box(v) @@ -961,7 +961,7 @@ def box(self, value): return self.BoxType(rffi.cast(rffi.DOUBLE, value)) - def runpack_str(self, s): + def runpack_str(self, space, s): assert len(s) == 2 fval = unpack_float(s, native_is_bigendian) return self.box(fval) @@ -1044,6 +1044,13 @@ raw_storage_setitem(storage, i+offset+rffi.sizeof(self.T), imag) + def runpack_str(self, space, s): + comp = self.ComponentBoxType._get_dtype(space).itemtype + l = len(s) // 2 + real = comp.runpack_str(space, s[:l]) + imag = comp.runpack_str(space, s[l:]) + return self.composite(real, imag) + @staticmethod def for_computation(v): return float(v[0]), float(v[1]) @@ -1570,7 +1577,7 @@ T = rffi.LONGDOUBLE BoxType = interp_boxes.W_FloatLongBox - def runpack_str(self, s): + def runpack_str(self, space, s): assert len(s) == interp_boxes.long_double_size fval = unpack_float80(s, native_is_bigendian) return self.box(fval) @@ -1586,10 +1593,24 @@ BoxType = interp_boxes.W_ComplexLongBox ComponentBoxType = interp_boxes.W_FloatLongBox -class BaseStringType(BaseType): +class FlexibleType(BaseType): def get_element_size(self): return rffi.sizeof(self.T) + @jit.unroll_safe + def to_str(self, item): + builder = StringBuilder() + assert isinstance(item, interp_boxes.W_FlexibleBox) + i = item.ofs + end = i + item.dtype.get_size() + while i < end: + assert isinstance(item.arr.storage[i], str) + if item.arr.storage[i] == '\x00': + break + builder.append(item.arr.storage[i]) + i += 1 + return builder.build() + def str_unary_op(func): specialize.argtype(1)(func) @functools.wraps(func) @@ -1607,7 +1628,7 @@ ) return dispatcher -class StringType(BaseStringType): +class StringType(FlexibleType): T = lltype.Char @jit.unroll_safe @@ -1623,7 +1644,7 @@ def store(self, arr, i, offset, box): assert isinstance(box, interp_boxes.W_StringBox) - size = min(arr.dtype.size, box.arr.size - box.ofs) + size = min(arr.dtype.size - offset, box.arr.size - box.ofs) return self._store(arr.storage, i, offset, box, size) @jit.unroll_safe @@ -1637,20 +1658,6 @@ dtype = arr.dtype return interp_boxes.W_StringBox(arr, i + offset, dtype) - @jit.unroll_safe - def to_str(self, item): - builder = StringBuilder() - assert isinstance(item, interp_boxes.W_StringBox) - i = item.ofs - end = i + item.dtype.get_size() - while i < end: - assert isinstance(item.arr.storage[i], str) - if item.arr.storage[i] == '\x00': - break - builder.append(item.arr.storage[i]) - i += 1 - return builder.build() - def str_format(self, item): builder = StringBuilder() builder.append("'") @@ -1726,7 +1733,7 @@ for i in xrange(start, stop, width): self._store(storage, i, offset, box, width) -class UnicodeType(BaseStringType): +class UnicodeType(FlexibleType): T = lltype.UniChar @jit.unroll_safe @@ -1736,7 +1743,7 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "coerce (probably from set_item) not implemented for unicode type")) -class VoidType(BaseStringType): +class VoidType(FlexibleType): T = lltype.Char def _coerce(self, space, arr, ofs, dtype, w_items, shape): @@ -1775,17 +1782,50 @@ from pypy.module.micronumpy.base import W_NDimArray if dtype is None: dtype = arr.dtype - strides, backstrides = support.calc_strides(dtype.shape, dtype.subdtype, arr.order) + strides, backstrides = support.calc_strides(dtype.shape, + dtype.subdtype, arr.order) implementation = SliceArray(i + offset, strides, backstrides, - dtype.shape, arr, W_NDimArray(arr), dtype.subdtype) + dtype.shape, arr, W_NDimArray(arr), + dtype.subdtype) return W_NDimArray(implementation) -class RecordType(BaseType): + def read(self, arr, i, offset, dtype=None): + if dtype is None: + dtype = arr.dtype + return interp_boxes.W_VoidBox(arr, i + offset, dtype) + + @jit.unroll_safe + def str_format(self, box): + assert isinstance(box, interp_boxes.W_VoidBox) + arr = self.readarray(box.arr, box.ofs, 0, box.dtype) + return arr.dump_data(prefix='', suffix='') + + def to_builtin_type(self, space, item): + ''' From the documentation of ndarray.item(): + "Void arrays return a buffer object for item(), + unless fields are defined, in which case a tuple is returned." + ''' + assert isinstance(item, interp_boxes.W_VoidBox) + dt = item.arr.dtype + ret_unwrapped = [] + for name in dt.fieldnames: + ofs, dtype = dt.fields[name] + if isinstance(dtype.itemtype, VoidType): + read_val = dtype.itemtype.readarray(item.arr, ofs, 0, dtype) + else: + read_val = dtype.itemtype.read(item.arr, ofs, 0, dtype) + if isinstance (read_val, interp_boxes.W_StringBox): + # StringType returns a str + read_val = space.wrap(dtype.itemtype.to_str(read_val)) + ret_unwrapped = ret_unwrapped + [read_val,] + if len(ret_unwrapped) == 0: + raise OperationError(space.w_NotImplementedError, space.wrap( + "item() for Void aray with no fields not implemented")) + return space.newtuple(ret_unwrapped) + +class RecordType(FlexibleType): T = lltype.Char - def get_element_size(self): - return rffi.sizeof(self.T) - def read(self, arr, i, offset, dtype=None): if dtype is None: dtype = arr.dtype @@ -1819,6 +1859,17 @@ for k in range(box.arr.dtype.get_size()): arr.storage[k + i] = box.arr.storage[k + box.ofs] + def to_builtin_type(self, space, box): + assert isinstance(box, interp_boxes.W_VoidBox) + items = [] + dtype = box.dtype + for name in dtype.fieldnames: + ofs, subdtype = dtype.fields[name] + itemtype = subdtype.itemtype + subbox = itemtype.read(box.arr, box.ofs, ofs, subdtype) + items.append(itemtype.to_builtin_type(space, subbox)) + return space.newtuple(items) + @jit.unroll_safe def str_format(self, box): assert isinstance(box, interp_boxes.W_VoidBox) @@ -1831,7 +1882,8 @@ first = False else: pieces.append(", ") - pieces.append(tp.str_format(tp.read(box.arr, box.ofs, ofs))) + val = tp.read(box.arr, box.ofs, ofs, subdtype) + pieces.append(tp.str_format(val)) pieces.append(")") return "".join(pieces) diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -16,8 +16,13 @@ pass try: + encoding = sys.stderr.encoding + except: + encoding = None + + try: from traceback import print_exception - print_exception(exctype, value, traceback) + print_exception(exctype, value, traceback, _encoding=encoding) except: if not excepthook_failsafe(exctype, value): raise diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -207,6 +207,36 @@ assert err.getvalue() == "ValueError: 42\n" + def test_original_excepthook_pypy_encoding(self): + import sys + if '__pypy__' not in sys.builtin_module_names: + skip("only on PyPy") + savestderr = sys.stderr + class MyStringIO(object): + def __init__(self): + self.output = [] + def write(self, s): + assert isinstance(s, str) + self.output.append(s) + def getvalue(self): + return ''.join(self.output) + + for input, expectedoutput in [(u"\u013a", "\xe5"), + (u"\u1111", "\\u1111")]: + err = MyStringIO() + err.encoding = 'iso-8859-2' + sys.stderr = err + + eh = sys.__excepthook__ + try: + raise ValueError(input) + except ValueError, exc: + eh(*sys.exc_info()) + + sys.stderr = savestderr + print repr(err.getvalue()) + assert err.getvalue().endswith("ValueError: %s\n" % expectedoutput) + # FIXME: testing the code for a lost or replaced excepthook in # Python/pythonrun.c::PyErr_PrintEx() is tricky. diff --git a/pypy/module/termios/test/test_termios.py b/pypy/module/termios/test/test_termios.py --- a/pypy/module/termios/test/test_termios.py +++ b/pypy/module/termios/test/test_termios.py @@ -7,6 +7,9 @@ if os.name != 'posix': py.test.skip('termios module only available on unix') +if sys.platform.startswith('freebsd'): + raise Exception('XXX seems to hangs on FreeBSD9') + class TestTermios(object): def setup_class(cls): try: diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py @@ -1638,7 +1638,11 @@ #include #define alloca _alloca #else - #include + # ifdef __FreeBSD__ + # include + # else + # include + # endif #endif static int (*python_callback)(int how_many, int *values); static int c_callback(int how_many, ...) { diff --git a/pypy/module/test_lib_pypy/pyrepl/__init__.py b/pypy/module/test_lib_pypy/pyrepl/__init__.py --- a/pypy/module/test_lib_pypy/pyrepl/__init__.py +++ b/pypy/module/test_lib_pypy/pyrepl/__init__.py @@ -1,3 +1,6 @@ import sys import lib_pypy.pyrepl sys.modules['pyrepl'] = sys.modules['lib_pypy.pyrepl'] + +if sys.platform.startswith('freebsd'): + raise Exception('XXX seems to hangs on FreeBSD9') diff --git a/pypy/module/test_lib_pypy/test_grp_extra.py b/pypy/module/test_lib_pypy/test_grp_extra.py --- a/pypy/module/test_lib_pypy/test_grp_extra.py +++ b/pypy/module/test_lib_pypy/test_grp_extra.py @@ -16,7 +16,7 @@ except KeyError: continue assert g.gr_gid == 0 - assert g.gr_mem == ['root'] or g.gr_mem == [] + assert 'root' in g.gr_mem or g.gr_mem == [] assert g.gr_name == name assert isinstance(g.gr_passwd, str) # usually just 'x', don't hope :-) break diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -67,18 +67,22 @@ raise PyPyCNotFound( 'Bogus path: %r does not exist (see docstring for more info)' % (os.path.dirname(str(pypy_c)),)) + win_extras = ['libpypy-c.dll', 'libexpat.dll', 'sqlite3.dll', + 'libeay32.dll', 'ssleay32.dll'] subprocess.check_call([str(pypy_c), '-c', 'import _sqlite3']) if not sys.platform == 'win32': subprocess.check_call([str(pypy_c), '-c', 'import _curses']) subprocess.check_call([str(pypy_c), '-c', 'import syslog']) - if not withouttk: - try: - subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) - except subprocess.CalledProcessError: - print >>sys.stderr, """Building Tk bindings failed. + if not withouttk: + try: + subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) + except subprocess.CalledProcessError: + print >>sys.stderr, """Building Tk bindings failed. You can either install Tk development headers package or add --without-tk option to skip packaging binary CFFI extension.""" - sys.exit(1) + sys.exit(1) + #Can the dependencies be found from cffi somehow? + win_extras += ['tcl85.dll', 'tk85.dll'] if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'): rename_pypy_c += '.exe' binaries = [(pypy_c, rename_pypy_c)] @@ -101,9 +105,7 @@ # Can't rename a DLL: it is always called 'libpypy-c.dll' - for extra in ['libpypy-c.dll', - 'libexpat.dll', 'sqlite3.dll', - 'libeay32.dll', 'ssleay32.dll']: + for extra in win_extras: p = pypy_c.dirpath().join(extra) if not p.check(): p = py.path.local.sysfind(extra) @@ -122,6 +124,19 @@ # XXX users will complain that they cannot compile cpyext # modules for windows, has the lib moved or are there no # exported functions in the dll so no import library is created? + if not withouttk: + try: + p = pypy_c.dirpath().join('tcl85.dll') + if not p.check(): + p = py.path.local.sysfind('tcl85.dll') + tktcldir = p.dirpath().join('..').join('lib') + shutil.copytree(str(tktcldir), str(pypydir.join('tcl'))) + except WindowsError: + print >>sys.stderr, """Packaging Tk runtime failed. +tk85.dll and tcl85.dll found, expecting to find runtime in ..\\lib +directory next to the dlls, as per build instructions.""" + import traceback;traceback.print_exc() + sys.exit(1) # Careful: to copy lib_pypy, copying just the hg-tracked files # would not be enough: there are also ctypes_config_cache/_*_cache.py. @@ -217,5 +232,11 @@ else: print_usage() + if os.environ.has_key("PYPY_PACKAGE_NOSTRIP"): + kw['nostrip'] = True + + if os.environ.has_key("PYPY_PACKAGE_WITHOUTTK"): + kw['withouttk'] = True + args = args[i:] package(*args, **kw) diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -371,15 +371,19 @@ listdef.generalize(self.immutablevalue(e, False)) result = SomeList(listdef) elif tp is dict or tp is r_dict or tp is SomeOrderedDict.knowntype: + if tp is SomeOrderedDict.knowntype: + cls = SomeOrderedDict + else: + cls = SomeDict if need_const: key = Constant(x) try: return self.immutable_cache[key] except KeyError: - result = SomeDict(DictDef(self, - s_ImpossibleValue, - s_ImpossibleValue, - is_r_dict = tp is r_dict)) + result = cls(DictDef(self, + s_ImpossibleValue, + s_ImpossibleValue, + is_r_dict = tp is r_dict)) self.immutable_cache[key] = result if tp is r_dict: s_eqfn = self.immutablevalue(x.key_eq) @@ -412,10 +416,7 @@ dictdef.generalize_key(self.immutablevalue(ek, False)) dictdef.generalize_value(self.immutablevalue(ev, False)) dictdef.seen_prebuilt_key(ek) - if tp is SomeOrderedDict.knowntype: - result = SomeOrderedDict(dictdef) - else: - result = SomeDict(dictdef) + result = cls(dictdef) elif tp is weakref.ReferenceType: x1 = x() if x1 is None: diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3463,7 +3463,7 @@ py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) - + def test_setslice(self): def f(): @@ -4140,6 +4140,27 @@ a.build_types(f, [str]) assert ("Cannot prove that the object is callable" in exc.value.msg) + def test_str_format_error(self): + def f(s, x): + return s.format(x) + a = self.RPythonAnnotator() + with py.test.raises(annmodel.AnnotatorError) as exc: + a.build_types(f, [str, str]) + assert ("format() is not RPython" in exc.value.msg) + + def test_prebuilt_ordered_dict(self): + try: + from collections import OrderedDict + except ImportError: + py.test.skip("Please upgrade to python 2.7") + d = OrderedDict([("aa", 1)]) + + def f(): + return d + + a = self.RPythonAnnotator() + assert isinstance(a.build_types(f, []), annmodel.SomeOrderedDict) + def g(n): return [0, 1, 2, n] diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -460,13 +460,13 @@ check_negative_slice(start, end, "count") return SomeInteger(nonneg=True) - def method_strip(str, chr): + def method_strip(str, chr=None): return str.basestringclass(no_nul=str.no_nul) - def method_lstrip(str, chr): + def method_lstrip(str, chr=None): return str.basestringclass(no_nul=str.no_nul) - def method_rstrip(str, chr): + def method_rstrip(str, chr=None): return str.basestringclass(no_nul=str.no_nul) def method_join(str, s_list): @@ -523,6 +523,9 @@ return SomeObject.op_contains(str, s_element) op_contains.can_only_throw = [] + def method_format(self, *args): + raise AnnotatorError("Method format() is not RPython") + class __extend__(SomeByteArray): def getslice(ba, s_start, s_stop): diff --git a/rpython/conftest.py b/rpython/conftest.py --- a/rpython/conftest.py +++ b/rpython/conftest.py @@ -24,20 +24,20 @@ def pytest_configure(config): global option option = config.option - -def _set_platform(opt, opt_str, value, parser): from rpython.config.translationoption import PLATFORMS from rpython.translator.platform import set_platform - if value not in PLATFORMS: - raise ValueError("%s not in %s" % (value, PLATFORMS)) - set_platform(value, None) + platform = config.option.platform + if platform not in PLATFORMS: + raise ValueError("%s not in %s" % (platform, PLATFORMS)) + set_platform(platform, None) + def pytest_addoption(parser): group = parser.getgroup("rpython options") group.addoption('--view', action="store_true", dest="view", default=False, help="view translation tests' flow graphs with Pygame") - group.addoption('-P', '--platform', action="callback", type="string", - default="host", callback=_set_platform, + group.addoption('-P', '--platform', action="store", dest="platform", + type="string", default="host", help="set up tests to use specified platform as compile/run target") group = parser.getgroup("JIT options") group.addoption('--viewloops', action="store_true", @@ -60,7 +60,7 @@ class LeakFinder: """Track memory allocations during test execution. - + So far, only used by the function lltype.malloc(flavor='raw'). """ def pytest_runtest_setup(self, __multicall__, item): diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -227,20 +227,81 @@ class HardFloatCallBuilder(ARMCallbuilder): + next_arg_vfp = 0 + next_arg_svfp = 0 + + def get_next_vfp(self, tp): + assert tp in 'fS' + if self.next_arg_vfp == -1: + return None + if tp == 'S': + i = self.next_arg_svfp + next_vfp = (i >> 1) + 1 + if not (i + 1) & 1: # i is even + self.next_arg_vfp = max(self.next_arg_vfp, next_vfp) + self.next_arg_svfp = self.next_arg_vfp << 1 + else: + self.next_arg_svfp += 1 + self.next_arg_vfp = next_vfp + lst = r.svfp_argument_regs + else: # 64bit double + i = self.next_arg_vfp + self.next_arg_vfp += 1 + if self.next_arg_svfp >> 1 == i: + self.next_arg_svfp = self.next_arg_vfp << 1 + lst = r.vfp_argument_regs + try: + return lst[i] + except IndexError: + self.next_arg_vfp = self.next_arg_svfp = -1 + return None + def prepare_arguments(self): From noreply at buildbot.pypy.org Tue Nov 26 01:24:13 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 26 Nov 2013 01:24:13 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20131126002413.6173A1C051C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68324:0e2516a31328 Date: 2013-11-25 15:08 -0800 http://bitbucket.org/pypy/pypy/changeset/0e2516a31328/ Log: merge default diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py --- a/lib_pypy/pyrepl/simple_interact.py +++ b/lib_pypy/pyrepl/simple_interact.py @@ -67,3 +67,6 @@ except KeyboardInterrupt: console.write("\nKeyboardInterrupt\n") console.resetbuffer() + except MemoryError: + console.write("\nMemoryError\n") + console.resetbuffer() diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -15,3 +15,6 @@ .. branch: armhf-singlefloat JIT support for singlefloats on ARM using the hardfloat ABI + +.. branch: voidtype_strformat +Better support for record numpy arrays diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -25,6 +25,7 @@ from pypy.objspace.std.sliceobject import W_SliceObject from pypy.module.__builtin__.descriptor import W_Property from pypy.module.__builtin__.interp_memoryview import W_MemoryView +from pypy.module.micronumpy.base import W_NDimArray from rpython.rlib.entrypoint import entrypoint_lowlevel from rpython.rlib.rposix import is_valid_fd, validate_fd from rpython.rlib.unroll import unrolling_iterable @@ -470,6 +471,7 @@ "Complex": "space.w_complex", "ByteArray": "space.w_bytearray", "MemoryView": "space.gettypeobject(W_MemoryView.typedef)", + "Array": "space.gettypeobject(W_NDimArray.typedef)", "BaseObject": "space.w_object", 'None': 'space.type(space.w_None)', 'NotImplemented': 'space.type(space.w_NotImplemented)', diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -29,6 +29,22 @@ #define VA_LIST_IS_ARRAY #endif +#ifndef Py_BUILD_CORE /* not building the core - must be an ext */ +# if defined(_MSC_VER) + /* So MSVC users need not specify the .lib file in + * their Makefile (other compilers are generally + * taken care of by distutils.) */ +# ifdef _DEBUG +# error("debug first with cpython") +# pragma comment(lib,"python27.lib") +# else +# pragma comment(lib,"python27.lib") +# endif /* _DEBUG */ +# endif +#endif /* _MSC_VER */ + + + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/src/ndarrayobject.c b/pypy/module/cpyext/src/ndarrayobject.c --- a/pypy/module/cpyext/src/ndarrayobject.c +++ b/pypy/module/cpyext/src/ndarrayobject.c @@ -3,8 +3,6 @@ #include "numpy/arrayobject.h" #include /* memset, memcpy */ -PyTypeObject PyArray_Type; - void _PyArray_FILLWBYTE(PyObject* obj, int val) { memset(PyArray_DATA(obj), val, PyArray_NBYTES(obj)); diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -286,3 +286,19 @@ assert dt.num == 11 + def test_pass_ndarray_object_to_c(self): + from _numpypy.multiarray import ndarray + mod = self.import_extension('foo', [ + ("check_array", "METH_VARARGS", + ''' + PyObject* obj; + if (!PyArg_ParseTuple(args, "O!", &PyArray_Type, &obj)) + return NULL; + Py_INCREF(obj); + return obj; + '''), + ], prologue='#include ') + array = ndarray((3, 4), dtype='d') + assert mod.check_array(array) is array + raises(TypeError, "mod.check_array(42)") + diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -288,10 +288,8 @@ if space.is_w(w_startstop, space.w_None): start = 0 else: - start = space.int_w(w_startstop) - if start < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Indicies for islice() must be non-negative integers.")) + start = self.arg_int_w(w_startstop, 0, + "Indicies for islice() must be None or non-negative integers") w_stop = args_w[0] else: raise OperationError(space.w_TypeError, space.wrap("islice() takes at most 4 arguments (" + str(num_args) + " given)")) @@ -299,10 +297,8 @@ if space.is_w(w_stop, space.w_None): stop = -1 else: - stop = space.int_w(w_stop) - if stop < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Stop argument must be a non-negative integer or None.")) + stop = self.arg_int_w(w_stop, 0, + "Stop argument must be a non-negative integer or None.") stop = max(start, stop) # for obscure CPython compatibility if num_args == 2: @@ -310,10 +306,8 @@ if space.is_w(w_step, space.w_None): step = 1 else: - step = space.int_w(w_step) - if step < 1: - raise OperationError(space.w_ValueError, space.wrap( - "Step must be one or lager for islice().")) + step = self.arg_int_w(w_step, 1, + "Step for islice() must be a positive integer or None") else: step = 1 @@ -321,6 +315,18 @@ self.start = start self.stop = stop + def arg_int_w(self, w_obj, minimum, errormsg): + space = self.space + try: + result = space.int_w(w_obj) + except OperationError, e: + if e.async(space): + raise + result = -1 + if result < minimum: + raise OperationError(space.w_ValueError, space.wrap(errormsg)) + return result + def iter_w(self): return self.space.wrap(self) diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -277,6 +277,11 @@ raises(TypeError, itertools.islice, [], 0, 0, 0, 0) + # why not TypeError? Because CPython + raises(ValueError, itertools.islice, [], "a", 1, 2) + raises(ValueError, itertools.islice, [], 0, "a", 2) + raises(ValueError, itertools.islice, [], 0, 1, "a") + def test_chain(self): import itertools diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -56,6 +56,8 @@ self.aliases = aliases self.float_type = float_type self.fields = fields + if fieldnames is None: + fieldnames = [] self.fieldnames = fieldnames self.shape = list(shape) self.subdtype = subdtype @@ -214,15 +216,15 @@ self.name = "void" + str(8 * self.get_size()) def descr_get_names(self, space): - if self.fieldnames is None: + if len(self.fieldnames) == 0: return space.w_None return space.newtuple([space.wrap(name) for name in self.fieldnames]) def set_names(self, space, w_names): + self.fieldnames = [] if w_names == space.w_None: - self.fieldnames = None + return else: - self.fieldnames = [] iter = space.iter(w_names) while True: try: diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -249,12 +249,13 @@ return space.wrap(self.dump_data()) return space.call_function(cache.w_array_str, self) - def dump_data(self): + def dump_data(self, prefix='array(', suffix=')'): i = self.create_iter() first = True dtype = self.get_dtype() s = StringBuilder() - s.append('array([') + s.append(prefix) + s.append('[') while not i.done(): if first: first = False @@ -262,7 +263,8 @@ s.append(', ') s.append(dtype.itemtype.str_format(i.getitem())) i.next() - s.append('])') + s.append(']') + s.append(suffix) return s.build() def create_iter(self, shape=None, backward_broadcast=False, require_index=False): diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -61,10 +61,22 @@ def apply(self, space, orig_arr): arr = orig_arr.implementation ofs, subdtype = arr.dtype.fields[self.name] - # strides backstrides are identical, ofs only changes start - return W_NDimArray.new_slice(space, arr.start + ofs, arr.get_strides(), - arr.get_backstrides(), - arr.shape, arr, orig_arr, subdtype) + # ofs only changes start + # create a view of the original array by extending + # the shape, strides, backstrides of the array + from pypy.module.micronumpy.support import calc_strides + strides, backstrides = calc_strides(subdtype.shape, + subdtype.subdtype, arr.order) + final_shape = arr.shape + subdtype.shape + final_strides = arr.get_strides() + strides + final_backstrides = arr.get_backstrides() + backstrides + final_dtype = subdtype + print self.name,'strides',arr.get_strides(),strides + if subdtype.subdtype: + final_dtype = subdtype.subdtype + return W_NDimArray.new_slice(space, arr.start + ofs, final_strides, + final_backstrides, + final_shape, arr, orig_arr, final_dtype) class Chunks(BaseChunk): def __init__(self, l): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3088,7 +3088,9 @@ ] h = np.array(buf, dtype=descr) assert len(h) == 2 - skip('broken') # XXX + assert h['x'].shape == (2, 2) + assert h['y'].strides == (41, 16, 8) + assert h['z'].shape == (2,) for v in (h, h[0], h['x']): repr(v) # check for crash in repr assert (h['x'] == np.array([buf[0][0], @@ -3119,6 +3121,22 @@ assert len(list(a[0])) == 2 + def test_3d_record(self): + from numpypy import dtype, array + dt = dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 2, 3))]) + a = array([('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]])], + dtype=dt) + s = str(a) + i = a.item() + assert isinstance(i, tuple) + assert len(i) == 4 + skip('incorrect formatting via dump_data') + assert s.endswith("[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " + "[[7, 8, 9], [10, 11, 12]]])]") + + def test_issue_1589(self): import numpypy as numpy c = numpy.array([[(1, 2, 'a'), (3, 4, 'b')], [(5, 6, 'c'), (7, 8, 'd')]], diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1789,6 +1789,40 @@ dtype.subdtype) return W_NDimArray(implementation) + def read(self, arr, i, offset, dtype=None): + if dtype is None: + dtype = arr.dtype + return interp_boxes.W_VoidBox(arr, i + offset, dtype) + + @jit.unroll_safe + def str_format(self, box): + assert isinstance(box, interp_boxes.W_VoidBox) + arr = self.readarray(box.arr, box.ofs, 0, box.dtype) + return arr.dump_data(prefix='', suffix='') + + def to_builtin_type(self, space, item): + ''' From the documentation of ndarray.item(): + "Void arrays return a buffer object for item(), + unless fields are defined, in which case a tuple is returned." + ''' + assert isinstance(item, interp_boxes.W_VoidBox) + dt = item.arr.dtype + ret_unwrapped = [] + for name in dt.fieldnames: + ofs, dtype = dt.fields[name] + if isinstance(dtype.itemtype, VoidType): + read_val = dtype.itemtype.readarray(item.arr, ofs, 0, dtype) + else: + read_val = dtype.itemtype.read(item.arr, ofs, 0, dtype) + if isinstance (read_val, interp_boxes.W_StringBox): + # StringType returns a str + read_val = space.wrap(dtype.itemtype.to_str(read_val)) + ret_unwrapped = ret_unwrapped + [read_val,] + if len(ret_unwrapped) == 0: + raise OperationError(space.w_NotImplementedError, space.wrap( + "item() for Void aray with no fields not implemented")) + return space.newtuple(ret_unwrapped) + class RecordType(FlexibleType): T = lltype.Char @@ -1848,7 +1882,8 @@ first = False else: pieces.append(", ") - pieces.append(tp.str_format(tp.read(box.arr, box.ofs, ofs))) + val = tp.read(box.arr, box.ofs, ofs, subdtype) + pieces.append(tp.str_format(val)) pieces.append(")") return "".join(pieces) diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -371,15 +371,19 @@ listdef.generalize(self.immutablevalue(e, False)) result = SomeList(listdef) elif tp is dict or tp is r_dict or tp is SomeOrderedDict.knowntype: + if tp is SomeOrderedDict.knowntype: + cls = SomeOrderedDict + else: + cls = SomeDict if need_const: key = Constant(x) try: return self.immutable_cache[key] except KeyError: - result = SomeDict(DictDef(self, - s_ImpossibleValue, - s_ImpossibleValue, - is_r_dict = tp is r_dict)) + result = cls(DictDef(self, + s_ImpossibleValue, + s_ImpossibleValue, + is_r_dict = tp is r_dict)) self.immutable_cache[key] = result if tp is r_dict: s_eqfn = self.immutablevalue(x.key_eq) @@ -412,10 +416,7 @@ dictdef.generalize_key(self.immutablevalue(ek, False)) dictdef.generalize_value(self.immutablevalue(ev, False)) dictdef.seen_prebuilt_key(ek) - if tp is SomeOrderedDict.knowntype: - result = SomeOrderedDict(dictdef) - else: - result = SomeDict(dictdef) + result = cls(dictdef) elif tp is weakref.ReferenceType: x1 = x() if x1 is None: diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4148,6 +4148,19 @@ a.build_types(f, [str, str]) assert ("format() is not RPython" in exc.value.msg) + def test_prebuilt_ordered_dict(self): + try: + from collections import OrderedDict + except ImportError: + py.test.skip("Please upgrade to python 2.7") + d = OrderedDict([("aa", 1)]) + + def f(): + return d + + a = self.RPythonAnnotator() + assert isinstance(a.build_types(f, []), annmodel.SomeOrderedDict) + def g(n): return [0, 1, 2, n] diff --git a/rpython/jit/backend/x86/test/test_ztranslation_basic.py b/rpython/jit/backend/x86/test/test_ztranslation_basic.py --- a/rpython/jit/backend/x86/test/test_ztranslation_basic.py +++ b/rpython/jit/backend/x86/test/test_ztranslation_basic.py @@ -1,11 +1,11 @@ from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTest from rpython.jit.backend.x86.arch import WORD +import sys class TestTranslationX86(TranslationTest): def _check_cbuilder(self, cbuilder): - # We assume here that we have sse2. If not, the CPUClass - # needs to be changed to CPU386_NO_SSE2, but well. - if WORD == 4: + # msse2 and sse are always on on x86-64 + if WORD == 4 and sys.platform != 'win32': assert '-msse2' in cbuilder.eci.compile_extra assert '-mfpmath=sse' in cbuilder.eci.compile_extra diff --git a/rpython/jit/backend/x86/test/test_ztranslation_call_assembler.py b/rpython/jit/backend/x86/test/test_ztranslation_call_assembler.py --- a/rpython/jit/backend/x86/test/test_ztranslation_call_assembler.py +++ b/rpython/jit/backend/x86/test/test_ztranslation_call_assembler.py @@ -1,11 +1,13 @@ from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTestCallAssembler from rpython.translator.translator import TranslationContext from rpython.config.translationoption import DEFL_GC - +from rpython.jit.backend.x86.arch import WORD +import sys class TestTranslationCallAssemblerX86(TranslationTestCallAssembler): def _check_cbuilder(self, cbuilder): - # We assume here that we have sse2. If not, the CPUClass + #We assume here that we have sse2. If not, the CPUClass # needs to be changed to CPU386_NO_SSE2, but well. - assert '-msse2' in cbuilder.eci.compile_extra - assert '-mfpmath=sse' in cbuilder.eci.compile_extra \ No newline at end of file + if WORD == 4 and sys.platform != 'win32': + assert '-msse2' in cbuilder.eci.compile_extra + assert '-mfpmath=sse' in cbuilder.eci.compile_extra diff --git a/rpython/jit/backend/x86/test/test_ztranslation_jit_stats.py b/rpython/jit/backend/x86/test/test_ztranslation_jit_stats.py --- a/rpython/jit/backend/x86/test/test_ztranslation_jit_stats.py +++ b/rpython/jit/backend/x86/test/test_ztranslation_jit_stats.py @@ -1,11 +1,14 @@ from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTestJITStats from rpython.translator.translator import TranslationContext from rpython.config.translationoption import DEFL_GC +from rpython.jit.backend.x86.arch import WORD +import sys class TestTranslationJITStatsX86(TranslationTestJITStats): def _check_cbuilder(self, cbuilder): - # We assume here that we have sse2. If not, the CPUClass + #We assume here that we have sse2. If not, the CPUClass # needs to be changed to CPU386_NO_SSE2, but well. - assert '-msse2' in cbuilder.eci.compile_extra - assert '-mfpmath=sse' in cbuilder.eci.compile_extra \ No newline at end of file + if WORD == 4 and sys.platform != 'win32': + assert '-msse2' in cbuilder.eci.compile_extra + assert '-mfpmath=sse' in cbuilder.eci.compile_extra diff --git a/rpython/rlib/rdtoa.py b/rpython/rlib/rdtoa.py --- a/rpython/rlib/rdtoa.py +++ b/rpython/rlib/rdtoa.py @@ -38,6 +38,10 @@ ], ) +# dtoa.c is limited to 'int', so we refuse to pass it +# strings or integer arguments bigger than ~2GB +_INT_LIMIT = 0x7ffff000 + dg_strtod = rffi.llexternal( '_PyPy_dg_strtod', [rffi.CCHARP, rffi.CCHARPP], rffi.DOUBLE, compilation_info=eci, sandboxsafe=True) @@ -52,6 +56,8 @@ compilation_info=eci, sandboxsafe=True) def strtod(input): + if len(input) > _INT_LIMIT: + raise MemoryError end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') try: ll_input = rffi.str2charp(input) @@ -232,6 +238,8 @@ def dtoa(value, code='r', mode=0, precision=0, flags=0, special_strings=lower_special_strings, upper=False): + if precision > _INT_LIMIT: + raise MemoryError decpt_ptr = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') try: sign_ptr = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') diff --git a/rpython/rtyper/test/test_runicode.py b/rpython/rtyper/test/test_runicode.py --- a/rpython/rtyper/test/test_runicode.py +++ b/rpython/rtyper/test/test_runicode.py @@ -282,6 +282,7 @@ test_int_valueerror = unsupported test_float = unsupported test_hlstr = unsupported + test_strip_multiple_chars = unsupported def test_hash_via_type(self): from rpython.rlib.objectmodel import compute_hash diff --git a/rpython/translator/c/src/dtoa.c b/rpython/translator/c/src/dtoa.c --- a/rpython/translator/c/src/dtoa.c +++ b/rpython/translator/c/src/dtoa.c @@ -2329,7 +2329,7 @@ static char * __Py_dg_dtoa(double dd, int mode, int ndigits, - Signed *decpt, Signed *sign, char **rve) + int *decpt, int *sign, char **rve) { /* Arguments ndigits, decpt, sign are similar to those of ecvt and fcvt; trailing zeros are suppressed from @@ -2952,7 +2952,7 @@ } char * _PyPy_dg_dtoa(double dd, int mode, int ndigits, - Signed *decpt, Signed *sign, char **rve) + int *decpt, int *sign, char **rve) { char* result; _PyPy_SET_53BIT_PRECISION_HEADER; diff --git a/rpython/translator/c/src/dtoa.h b/rpython/translator/c/src/dtoa.h --- a/rpython/translator/c/src/dtoa.h +++ b/rpython/translator/c/src/dtoa.h @@ -2,6 +2,6 @@ double _PyPy_dg_strtod(const char *str, char **ptr); char * _PyPy_dg_dtoa(double d, int mode, int ndigits, - Signed *decpt, Signed *sign, char **rve); + int *decpt, int *sign, char **rve); void _PyPy_dg_freedtoa(char *s); diff --git a/rpython/translator/c/test/test_exception.py b/rpython/translator/c/test/test_exception.py --- a/rpython/translator/c/test/test_exception.py +++ b/rpython/translator/c/test/test_exception.py @@ -156,3 +156,20 @@ assert res == 42 res = f1(0) assert res == 100 + +def test_dict_keyerror_inside_try_finally(): + class CtxMgr: + def __enter__(self): + return 42 + def __exit__(self, *args): + pass + def fn(x): + d = {5: x} + with CtxMgr() as forty_two: + try: + return d[x] + except KeyError: + return forty_two + f1 = getcompiledopt(fn, [int]) + res = f1(100) + assert res == 42 diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py --- a/rpython/translator/c/test/test_genc.py +++ b/rpython/translator/c/test/test_genc.py @@ -574,6 +574,22 @@ fn = compile(chooser, [bool]) assert fn(True) +def test_ordered_dict(): + try: + from collections import OrderedDict + except ImportError: + py.test.skip("Please update to Python 2.7") + + expected = [('ea', 1), ('bb', 2), ('c', 3), ('d', 4), ('e', 5), + ('ef', 6)] + d = OrderedDict(expected) + + def f(): + assert d.items() == expected + + fn = compile(f, []) + fn() + def test_inhibit_tail_call(): def foobar_fn(n): return 42 diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -49,6 +49,18 @@ response_file = relto.bestrelpath(response_file) return ["-Wl,-exported_symbols_list,%s" % (response_file,)] + def gen_makefile(self, cfiles, eci, exe_name=None, path=None, + shared=False): + # ensure frameworks are passed in the Makefile + fs = self._frameworks(eci.frameworks) + if len(fs) > 0: + # concat (-framework, FrameworkName) pairs + self.extra_libs += tuple(map(" ".join, zip(fs[::2], fs[1::2]))) + mk = super(Darwin, self).gen_makefile(cfiles, eci, exe_name, path, + shared) + return mk + + class Darwin_i386(Darwin): name = "darwin_i386" link_flags = ('-arch', 'i386', '-mmacosx-version-min=10.4') diff --git a/rpython/translator/platform/test/test_darwin.py b/rpython/translator/platform/test/test_darwin.py --- a/rpython/translator/platform/test/test_darwin.py +++ b/rpython/translator/platform/test/test_darwin.py @@ -16,9 +16,14 @@ host_factory = Darwin_i386 else: host_factory = Darwin_x86_64 +elif platform.machine() == 'x86_64': + host_factory = Darwin_x86_64 else: host_factory = Darwin_PowerPC +def is_x86(): + return platform.machine() == 'i386' or platform.machine() == 'x86_64' + class TestDarwin(BasicTest): platform = host_factory() @@ -47,8 +52,39 @@ res = self.platform.execute(executable) self.check_res(res) + def test_frameworks_with_makefile(self): + from StringIO import StringIO + tmpdir = udir.join('fw_mk' + self.__class__.__name__).ensure(dir=1) + objcfile = tmpdir.join('test_simple.m') + objcfile.write(r''' + #import + int main (int argc, const char * argv[]) { + NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init]; + NSArray *args = [[NSProcessInfo processInfo] arguments]; + NSCountedSet *cset = [[NSCountedSet alloc] initWithArray:args]; + + printf("%d\n", 23); + + [cset release]; + [pool release]; + return 0; + } + ''') + eci = ExternalCompilationInfo(frameworks=('Cocoa',)) + mk = self.platform.gen_makefile([objcfile], eci, path=tmpdir) + # The framework should end up in the Makefile + out = StringIO() + mk.write(out) + assert "-framework Cocoa" in out.getvalue() + # check that it actually works + mk.write() + self.platform.execute_makefile(mk) + res = self.platform.execute(tmpdir.join('test_simple')) + self.check_res(res, expected="23\n") + + def test_64_32_results(self): - if platform.machine() != 'i386': + if not is_x86(): py.test.skip("i386 only") plat32 = Darwin_i386() plat64 = Darwin_x86_64() @@ -72,7 +108,7 @@ self.check_res(res, '1\n') def test_longsize(self): - if platform.machine() != 'i386': + if not is_x86(): py.test.skip("i386 only") cfile = udir.join('test_int_size.c') cfile.write(r''' @@ -88,9 +124,9 @@ executable = self.platform.compile([cfile], eci) res = self.platform.execute(executable) self.check_res(res, str(sys.maxint) + '\n') - + def test_32bit_makefile(self): - if platform.machine() != 'i386': + if not is_x86(): py.test.skip("i386 only") plat32 = Darwin_i386() plat64 = Darwin_x86_64() @@ -124,4 +160,3 @@ plat64.execute_makefile(mk) res = plat64.execute(tmpdir.join('test_int_size')) self.check_res(res, '1\n') - From noreply at buildbot.pypy.org Tue Nov 26 17:05:25 2013 From: noreply at buildbot.pypy.org (ltratt) Date: Tue, 26 Nov 2013 17:05:25 +0100 (CET) Subject: [pypy-commit] pypy more_strategies: Remove some of the unlikely special cases. Message-ID: <20131126160525.DE23A1C051C@cobra.cs.uni-duesseldorf.de> Author: Laurence Tratt Branch: more_strategies Changeset: r68325:422d7a1ecc4d Date: 2013-11-26 15:45 +0000 http://bitbucket.org/pypy/pypy/changeset/422d7a1ecc4d/ Log: Remove some of the unlikely special cases. These don't do any harm, but they are unlikely to trigger very often. By common consensus, they're probably better off removed. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1584,9 +1584,6 @@ # list. raise ValueError return self._safe_find(w_list, intv, start, stop) - elif w_objt is W_StringObject or w_objt is W_UnicodeObject \ - or self.space.type(w_obj).compares_by_identity(): - raise ValueError return ListStrategy.find(self, w_list, w_obj, start, stop) @@ -1639,9 +1636,6 @@ return self._safe_find(w_list, self.unwrap(w_obj), start, stop) elif w_objt is W_IntObject or w_objt is W_LongObject: return self._safe_find(w_list, w_obj.float_w(self.space), start, stop) - elif w_objt is W_StringObject or w_objt is W_UnicodeObject \ - or self.space.type(w_obj).compares_by_identity(): - raise ValueError return ListStrategy.find(self, w_list, w_obj, start, stop) def sort(self, w_list, reverse): From noreply at buildbot.pypy.org Wed Nov 27 09:39:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 27 Nov 2013 09:39:00 +0100 (CET) Subject: [pypy-commit] cffi release-0.8: Merged in eliben/cffi/eliben/fix-link-to-pycparser-pointing-to-its-ne-1385473489984 (pull request #22) Message-ID: <20131127083900.B739A1C154F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1431:00210fd3f65d Date: 2013-11-27 09:38 +0100 http://bitbucket.org/cffi/cffi/changeset/00210fd3f65d/ Log: Merged in eliben/cffi/eliben/fix-link-to-pycparser-pointing-to-its- ne-1385473489984 (pull request #22) Fix link to pycparser pointing to its new home diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -74,7 +74,7 @@ ``python-dev`` and ``libffi-dev`` (for Windows, libffi is included with CFFI). -* pycparser >= 2.06: http://code.google.com/p/pycparser/ +* pycparser >= 2.06: https://github.com/eliben/pycparser * a C compiler is required to use CFFI during development, but not to run correctly-installed programs that use CFFI. From noreply at buildbot.pypy.org Wed Nov 27 09:38:59 2013 From: noreply at buildbot.pypy.org (eliben) Date: Wed, 27 Nov 2013 09:38:59 +0100 (CET) Subject: [pypy-commit] cffi eliben/fix-link-to-pycparser-pointing-to-its-ne-1385473489984: Fix link to pycparser pointing to its new home Message-ID: <20131127083859.958FA1C150C@cobra.cs.uni-duesseldorf.de> Author: Eli Bendersky Branch: eliben/fix-link-to-pycparser-pointing-to-its-ne-1385473489984 Changeset: r1430:744ba8a30a4a Date: 2013-11-26 13:44 +0000 http://bitbucket.org/cffi/cffi/changeset/744ba8a30a4a/ Log: Fix link to pycparser pointing to its new home diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -74,7 +74,7 @@ ``python-dev`` and ``libffi-dev`` (for Windows, libffi is included with CFFI). -* pycparser >= 2.06: http://code.google.com/p/pycparser/ +* pycparser >= 2.06: https://github.com/eliben/pycparser * a C compiler is required to use CFFI during development, but not to run correctly-installed programs that use CFFI. From noreply at buildbot.pypy.org Wed Nov 27 09:40:02 2013 From: noreply at buildbot.pypy.org (eliben) Date: Wed, 27 Nov 2013 09:40:02 +0100 (CET) Subject: [pypy-commit] cffi default: Fix link to pycparser pointing to its new home Message-ID: <20131127084002.57B0F1C010B@cobra.cs.uni-duesseldorf.de> Author: Eli Bendersky Branch: Changeset: r1432:c0debe14a6ef Date: 2013-11-26 13:44 +0000 http://bitbucket.org/cffi/cffi/changeset/c0debe14a6ef/ Log: Fix link to pycparser pointing to its new home diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -74,7 +74,7 @@ ``python-dev`` and ``libffi-dev`` (for Windows, libffi is included with CFFI). -* pycparser >= 2.06: http://code.google.com/p/pycparser/ +* pycparser >= 2.06: https://github.com/eliben/pycparser * a C compiler is required to use CFFI during development, but not to run correctly-installed programs that use CFFI. From noreply at buildbot.pypy.org Wed Nov 27 11:26:45 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 27 Nov 2013 11:26:45 +0100 (CET) Subject: [pypy-commit] pypy default: Detail Message-ID: <20131127102645.4F2991C0205@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68326:236121491fc3 Date: 2013-11-27 11:25 +0100 http://bitbucket.org/pypy/pypy/changeset/236121491fc3/ Log: Detail diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -444,7 +444,7 @@ So the position of the core PyPy developers is that if anyone wants to make an N+1'th attempt with LLVM, they are welcome, and will be happy to provide help in the IRC channel, but they are left with the burden of proof -that it works. +that (a) it works and (b) it gives important benefits. ---------------------- How do I compile PyPy? From noreply at buildbot.pypy.org Wed Nov 27 14:41:34 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 27 Nov 2013 14:41:34 +0100 (CET) Subject: [pypy-commit] pypy default: Update the version number Message-ID: <20131127134134.0CF5A1C010B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68327:ea44cc416678 Date: 2013-11-27 13:23 +0100 http://bitbucket.org/pypy/pypy/changeset/ea44cc416678/ Log: Update the version number diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '2.2' # The full version, including alpha/beta/rc tags. -release = '2.2.0' +release = '2.2.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.2.0`_: the latest official release +* `Release 2.2.1`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.2.0`: http://pypy.org/download.html +.. _`Release 2.2.1`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html From noreply at buildbot.pypy.org Wed Nov 27 14:41:35 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 27 Nov 2013 14:41:35 +0100 (CET) Subject: [pypy-commit] pypy default: Import cffi/c0debe14a6ef Message-ID: <20131127134135.40FFA1C0205@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68328:4730a8f54c85 Date: 2013-11-27 13:23 +0100 http://bitbucket.org/pypy/pypy/changeset/4730a8f54c85/ Log: Import cffi/c0debe14a6ef diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8" -__version_info__ = (0, 8) +__version__ = "0.8.1" +__version_info__ = (0, 8, 1) diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_version.py b/pypy/module/test_lib_pypy/cffi_tests/test_version.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_version.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_version.py @@ -10,6 +10,7 @@ '0.4.2': '0.4', # did not change '0.7.1': '0.7', # did not change '0.7.2': '0.7', # did not change + '0.8.1': '0.8', # did not change } def test_version(): From noreply at buildbot.pypy.org Wed Nov 27 14:41:36 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 27 Nov 2013 14:41:36 +0100 (CET) Subject: [pypy-commit] pypy default: Release announcement Message-ID: <20131127134136.5DCF81C010B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68329:52d5e6298a41 Date: 2013-11-27 13:25 +0100 http://bitbucket.org/pypy/pypy/changeset/52d5e6298a41/ Log: Release announcement diff --git a/pypy/doc/release-2.2.1.rst b/pypy/doc/release-2.2.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.2.1.rst @@ -0,0 +1,47 @@ +======================================= +PyPy 2.2.1 - Incrementalism.1 +======================================= + +We're pleased to announce PyPy 2.2.1, which targets version 2.7.3 of the Python +language. This is a bugfix release over 2.2. + +You can download the PyPy 2.2.1 release here: + + http://pypy.org/download.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +This is a bugfix release. The most important bugs fixed are: + +* an issue in sockets' reference counting emulation, showing up + notably when using the ssl module and calling ``makefile()``. + +* Tkinter support on Windows. + +* If sys.maxunicode==65535 (on Windows and maybe OS/X), the json + decoder incorrectly decoded surrogate pairs. + +* some FreeBSD fixes. + +Note that CFFI 0.8.1 was released. Both versions 0.8 and 0.8.1 are +compatible with both PyPy 2.2 and 2.2.1. + + +Cheers, +Armin Rigo & everybody From noreply at buildbot.pypy.org Wed Nov 27 14:41:37 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 27 Nov 2013 14:41:37 +0100 (CET) Subject: [pypy-commit] pypy default: Add an assert against misuse of the 3rd argument Message-ID: <20131127134137.81D731C010B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68330:d7d658cf840b Date: 2013-11-27 14:40 +0100 http://bitbucket.org/pypy/pypy/changeset/d7d658cf840b/ Log: Add an assert against misuse of the 3rd argument diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -48,6 +48,7 @@ def package(basedir, name='pypy-nightly', rename_pypy_c='pypy', copy_to_dir=None, override_pypy_c=None, nostrip=False, withouttk=False): + assert '/' not in rename_pypy_c basedir = py.path.local(basedir) if override_pypy_c is None: basename = 'pypy-c' From noreply at buildbot.pypy.org Wed Nov 27 18:05:53 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 27 Nov 2013 18:05:53 +0100 (CET) Subject: [pypy-commit] pypy default: Rewrite most of "Writing extension modules for pypy". Message-ID: <20131127170553.62FC41C3282@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68331:67e88237933d Date: 2013-11-27 18:05 +0100 http://bitbucket.org/pypy/pypy/changeset/67e88237933d/ Log: Rewrite most of "Writing extension modules for pypy". diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -5,119 +5,68 @@ This document tries to explain how to interface the PyPy python interpreter with any external library. -Note: We try to describe state-of-the art, but it -might fade out of date as this is the front on which things are changing -in pypy rapidly. +Right now, there are the following possibilities of providing +third-party modules for the PyPy python interpreter (in order of +usefulness): -Possibilities -============= +* Write them in pure Python and use CFFI_. -Right now, there are three possibilities of providing third-party modules -for the PyPy python interpreter (in order of usefulness): +* Write them in pure Python and use ctypes_. -* Write them in pure python and use ctypes, see ctypes_ - section +* Write them in C++ and bind them through Reflex_. -* Write them in pure python and use direct libffi low-level bindings, See - \_ffi_ module description. +* Write them in as `RPython mixed modules`_. -* Write them in RPython as mixedmodule_, using *rffi* as bindings. -* Write them in C++ and bind them through Reflex_ +CFFI +==== -.. _ctypes: #CTypes -.. _\_ffi: #LibFFI -.. _mixedmodule: #Mixed Modules +CFFI__ is the recommended way. It is a way to write pure Python code +that accesses C libraries. The idea is to support either ABI- or +API-level access to C --- so that you can sanely access C libraries +without depending on details like the exact field order in the C +structures or the numerical value of all the constants. It works on +both CPython (as a separate ``pip install cffi``) and on PyPy, where it +is included by default. + +PyPy's JIT does a quite reasonable job on the Python code that call C +functions or manipulate C pointers with CFFI. (As of PyPy 2.2.1, it +could still be improved, but is already good.) + +See the documentation here__. + +.. __: http://cffi.readthedocs.org/ +.. __: http://cffi.readthedocs.org/ + CTypes ====== -The ctypes module in PyPy is ready to use. -It's goal is to be as-compatible-as-possible with the -`CPython ctypes`_ version. Right now it's able to support large examples, -such as pyglet. PyPy is planning to have a 100% compatible ctypes -implementation, without the CPython C-level API bindings (so it is very -unlikely that direct object-manipulation trickery through this API will work). +The goal of the ctypes module of PyPy is to be as compatible as possible +with the `CPython ctypes`_ version. It works for large examples, such +as pyglet. PyPy's implementation is not strictly 100% compatible with +CPython, but close enough for most cases. -We also provide a `ctypes-configure`_ for overcoming the platform dependencies, -not relying on the ctypes codegen. This tool works by querying gcc about -platform-dependent details (compiling small snippets of C code and running -them), so it'll benefit not pypy-related ctypes-based modules as well. +We also used to provide ``ctypes-configure`` for some API-level access. +This is now viewed as a precursor of CFFI, which you should use instead. +More (but older) information is available here__. +Also, ctypes' performance is not as good as CFFI's. -ctypes call are optimized by the JIT and the resulting machine code contains a -direct call to the target C function. However, due to the very dynamic nature -of ctypes, some overhead over a bare C call is still present, in particular to -check/convert the types of the parameters. Moreover, even if most calls are -optimized, some cannot and thus need to follow the slow path, not optimized by -the JIT. +.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +.. __: ctypes-implementation.html -.. _`ctypes-configure`: ctypes-implementation.html#ctypes-configure -.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +PyPy implements ctypes as pure Python code around two built-in modules +called ``_ffi`` and ``_rawffi``, which give a very low-level binding to +the C library libffi_. Nowadays it is not recommended to use directly +these two modules. -Pros ----- +.. _libffi: http://sourceware.org/libffi/ -Stable, CPython-compatible API. Most calls are fast, optimized by JIT. - -Cons ----- - -Problems with platform-dependency (although we partially solve -those). Although the JIT optimizes ctypes calls, some overhead is still -present. The slow-path is very slow. - - -LibFFI -====== - -Mostly in order to be able to write a ctypes module, we developed a very -low-level libffi bindings called ``_ffi``. (libffi is a C-level library for dynamic calling, -which is used by CPython ctypes). This library provides stable and usable API, -although it's API is a very low-level one. It does not contain any -magic. It is also optimized by the JIT, but has much less overhead than ctypes. - -Pros ----- - -It Works. Probably more suitable for a delicate code where ctypes magic goes -in a way. All calls are optimized by the JIT, there is no slow path as in -ctypes. - -Cons ----- - -It combines disadvantages of using ctypes with disadvantages of using mixed -modules. CPython-incompatible API, very rough and low-level. - -Mixed Modules -============= - -This is the most advanced and powerful way of writing extension modules. -It has some serious disadvantages: - -* a mixed module needs to be written in RPython, which is far more - complicated than Python (XXX link) - -* due to lack of separate compilation (as of July 2011), each - compilation-check requires to recompile whole PyPy python interpreter, - which takes 0.5-1h. We plan to solve this at some point in near future. - -* although rpython is a garbage-collected language, the border between - C and RPython needs to be managed by hand (each object that goes into the - C level must be explicitly freed). - -Some documentation is available `here`_ - -.. _`here`: rffi.html - -XXX we should provide detailed docs about lltype and rffi, especially if we - want people to follow that way. Reflex ====== -This method is still experimental and is being exercised on a branch, -`reflex-support`_, which adds the `cppyy`_ module. +This method is still experimental. It adds the `cppyy`_ module. The method works by using the `Reflex package`_ to provide reflection information of the C++ code, which is then used to automatically generate bindings at runtime. @@ -168,3 +117,15 @@ to work around it in python or with a C++ helper function. Although Reflex works on various platforms, the bindings with PyPy have only been tested on Linux. + + +RPython Mixed Modules +===================== + +This is the internal way to write built-in extension modules in PyPy. +It cannot be used by any 3rd-party module: the extension modules are +*built-in*, not independently loadable DLLs. + +This is reserved for special cases: it gives direct access to e.g. the +details of the JIT, allowing us to tweak its interaction with user code. +This is how the numpy module is being developed. From noreply at buildbot.pypy.org Wed Nov 27 18:06:38 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 27 Nov 2013 18:06:38 +0100 (CET) Subject: [pypy-commit] pypy release-2.2.x: Added tag release-2.2.1 for changeset 87aa9de10f9c Message-ID: <20131127170638.DC1EA1C3282@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.2.x Changeset: r68332:e851cf72bbdb Date: 2013-11-27 18:05 +0100 http://bitbucket.org/pypy/pypy/changeset/e851cf72bbdb/ Log: Added tag release-2.2.1 for changeset 87aa9de10f9c diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -7,3 +7,4 @@ 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm 8eb5b5ac4bba366e7c7519c186bdfcf9c28c075d release-2.2.0 +87aa9de10f9ca71da9ab4a3d53e0ba176b67d086 release-2.2.1 From noreply at buildbot.pypy.org Wed Nov 27 18:17:14 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 27 Nov 2013 18:17:14 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: avoid direct modification of opname Message-ID: <20131127171714.427F11C02AE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68333:ecad647dab5d Date: 2013-11-27 14:35 +0000 http://bitbucket.org/pypy/pypy/changeset/ecad647dab5d/ Log: avoid direct modification of opname diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -1,6 +1,7 @@ import types from rpython.flowspace.model import Constant +from rpython.flowspace.operation import op from rpython.annotator import description, model as annmodel from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import Void @@ -401,7 +402,8 @@ r_method = self.rtyper.getrepr(s_attr) r_method.get_method_from_instance(self, vinst, hop.llops) hop2 = hop.copy() - hop2.spaceop.opname = 'simple_call' + hop2.spaceop = op.simple_call(hop.spaceop.args[0]) + hop2.spaceop.result = hop.spaceop.result hop2.args_r = [r_method] hop2.args_s = [s_attr] return hop2.dispatch() From noreply at buildbot.pypy.org Wed Nov 27 18:19:16 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 27 Nov 2013 18:19:16 +0100 (CET) Subject: [pypy-commit] pypy default: Kill unused line Message-ID: <20131127171916.48E651C02AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68334:08de9f5945f5 Date: 2013-11-27 18:18 +0100 http://bitbucket.org/pypy/pypy/changeset/08de9f5945f5/ Log: Kill unused line diff --git a/pypy/module/posix/app_startfile.py b/pypy/module/posix/app_startfile.py --- a/pypy/module/posix/app_startfile.py +++ b/pypy/module/posix/app_startfile.py @@ -7,7 +7,6 @@ ffi.cdef(""" HINSTANCE ShellExecuteA(HWND, LPCSTR, LPCSTR, LPCSTR, LPCSTR, INT); HINSTANCE ShellExecuteW(HWND, LPCWSTR, LPCWSTR, LPCWSTR, LPCWSTR, INT); - DWORD GetLastError(void); """) self.NULL = ffi.NULL self.cast = ffi.cast From noreply at buildbot.pypy.org Wed Nov 27 18:26:41 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 27 Nov 2013 18:26:41 +0100 (CET) Subject: [pypy-commit] pypy less-stringly-ops: close branch before merging Message-ID: <20131127172641.BCCA91C02AE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r68335:7e3a96135624 Date: 2013-11-27 17:25 +0000 http://bitbucket.org/pypy/pypy/changeset/7e3a96135624/ Log: close branch before merging From noreply at buildbot.pypy.org Wed Nov 27 18:26:43 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 27 Nov 2013 18:26:43 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge less-stringly-ops Message-ID: <20131127172643.C50D11C02AE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r68336:3f3aa470a29f Date: 2013-11-27 17:25 +0000 http://bitbucket.org/pypy/pypy/changeset/3f3aa470a29f/ Log: hg merge less-stringly-ops diff too long, truncating to 2000 out of 3956 lines diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -584,10 +584,6 @@ def consider_op(self, block, opindex): op = block.operations[opindex] argcells = [self.binding(a) for a in op.args] - consider_meth = getattr(self,'consider_op_'+op.opname, - None) - if not consider_meth: - raise Exception,"unknown op: %r" % op # let's be careful about avoiding propagated SomeImpossibleValues # to enter an op; the latter can result in violations of the @@ -599,7 +595,7 @@ if isinstance(arg, annmodel.SomeImpossibleValue): raise BlockedInference(self, op, opindex) try: - resultcell = consider_meth(*argcells) + resultcell = op.consider(self, *argcells) except annmodel.AnnotatorError as e: # note that UnionError is a subclass graph = self.bookkeeper.position_key[0] e.source = gather_error(self, graph, block, opindex) diff --git a/rpython/annotator/argument.py b/rpython/annotator/argument.py --- a/rpython/annotator/argument.py +++ b/rpython/annotator/argument.py @@ -1,69 +1,25 @@ """ Arguments objects. """ -from rpython.annotator.model import SomeTuple, SomeObject +from rpython.annotator.model import SomeTuple +from rpython.flowspace.argument import CallSpec -# for parsing call arguments -class RPythonCallsSpace(object): - """Pseudo Object Space providing almost no real operation. - For the Arguments class: if it really needs other operations, it means - that the call pattern is too complex for R-Python. - """ - def newtuple(self, items_s): - if len(items_s) == 1 and items_s[0] is Ellipsis: - res = SomeObject() # hack to get a SomeObject as the *arg - res.from_ellipsis = True - return res - else: - return SomeTuple(items_s) - - def unpackiterable(self, s_obj, expected_length=None): - if isinstance(s_obj, SomeTuple): - return list(s_obj.items) - if (s_obj.__class__ is SomeObject and - getattr(s_obj, 'from_ellipsis', False)): # see newtuple() - return [Ellipsis] - raise CallPatternTooComplex("'*' argument must be SomeTuple") - - def bool(self, s_tup): - assert isinstance(s_tup, SomeTuple) - return bool(s_tup.items) - - -class CallPatternTooComplex(Exception): - pass - - -class ArgumentsForTranslation(object): - w_starstararg = None - def __init__(self, space, args_w, keywords=None, keywords_w=None, - w_stararg=None, w_starstararg=None): - self.w_stararg = w_stararg - assert w_starstararg is None - self.space = space - assert isinstance(args_w, list) - self.arguments_w = args_w - self.keywords = keywords - self.keywords_w = keywords_w - self.keyword_names_w = None - - def __repr__(self): - """ NOT_RPYTHON """ - name = self.__class__.__name__ - if not self.keywords: - return '%s(%s)' % (name, self.arguments_w,) - else: - return '%s(%s, %s, %s)' % (name, self.arguments_w, - self.keywords, self.keywords_w) - +class ArgumentsForTranslation(CallSpec): @property def positional_args(self): if self.w_stararg is not None: - args_w = self.space.unpackiterable(self.w_stararg) + args_w = self.unpackiterable(self.w_stararg) return self.arguments_w + args_w else: return self.arguments_w + def newtuple(self, items_s): + return SomeTuple(items_s) + + def unpackiterable(self, s_obj): + assert isinstance(s_obj, SomeTuple) + return list(s_obj.items) + def fixedunpack(self, argcount): """The simplest argument parsing: get the 'argcount' arguments, or raise a real ValueError if the length is wrong.""" @@ -77,14 +33,12 @@ def prepend(self, w_firstarg): # used often "Return a new Arguments with a new argument inserted first." - return ArgumentsForTranslation(self.space, [w_firstarg] + self.arguments_w, - self.keywords, self.keywords_w, self.w_stararg, - self.w_starstararg) + return ArgumentsForTranslation([w_firstarg] + self.arguments_w, + self.keywords, self.w_stararg) def copy(self): - return ArgumentsForTranslation(self.space, self.arguments_w, - self.keywords, self.keywords_w, self.w_stararg, - self.w_starstararg) + return ArgumentsForTranslation(self.arguments_w, self.keywords, + self.w_stararg) def _match_signature(self, scope_w, signature, defaults_w=None): """Parse args and kwargs according to the signature of a code object, @@ -97,7 +51,7 @@ args_w = self.positional_args num_args = len(args_w) - keywords = self.keywords or [] + keywords = self.keywords num_kwds = len(keywords) # put as many positional input arguments into place as available @@ -111,7 +65,7 @@ starargs_w = args_w[co_argcount:] else: starargs_w = [] - scope_w[co_argcount] = self.space.newtuple(starargs_w) + scope_w[co_argcount] = self.newtuple(starargs_w) elif num_args > co_argcount: raise ArgErrCount(num_args, num_kwds, signature, defaults_w, 0) @@ -119,22 +73,17 @@ # handle keyword arguments num_remainingkwds = 0 - keywords_w = self.keywords_w kwds_mapping = None if num_kwds: # kwds_mapping maps target indexes in the scope (minus input_argcount) - # to positions in the keywords_w list - kwds_mapping = [-1] * (co_argcount - input_argcount) + # to keyword names + kwds_mapping = [] # match the keywords given at the call site to the argument names # the called function takes # this function must not take a scope_w, to make the scope not # escape num_remainingkwds = len(keywords) - for i, name in enumerate(keywords): - # If name was not encoded as a string, it could be None. In that - # case, it's definitely not going to be in the signature. - if name is None: - continue + for name in keywords: j = signature.find_argname(name) # if j == -1 nothing happens if j < input_argcount: @@ -142,14 +91,14 @@ if j >= 0: raise ArgErrMultipleValues(name) else: - kwds_mapping[j - input_argcount] = i # map to the right index + kwds_mapping.append(name) num_remainingkwds -= 1 if num_remainingkwds: if co_argcount == 0: raise ArgErrCount(num_args, num_kwds, signature, defaults_w, 0) - raise ArgErrUnknownKwds(self.space, num_remainingkwds, keywords, - kwds_mapping, self.keyword_names_w) + raise ArgErrUnknownKwds(num_remainingkwds, keywords, + kwds_mapping) # check for missing arguments and fill them from the kwds, # or with defaults, if available @@ -157,14 +106,11 @@ if input_argcount < co_argcount: def_first = co_argcount - (0 if defaults_w is None else len(defaults_w)) j = 0 - kwds_index = -1 for i in range(input_argcount, co_argcount): - if kwds_mapping is not None: - kwds_index = kwds_mapping[j] - j += 1 - if kwds_index >= 0: - scope_w[i] = keywords_w[kwds_index] - continue + name = signature.argnames[i] + if name in keywords: + scope_w[i] = keywords[name] + continue defnum = i - def_first if defnum >= 0: scope_w[i] = defaults_w[defnum] @@ -175,8 +121,7 @@ def unpack(self): "Return a ([w1,w2...], {'kw':w3...}) pair." - kwds_w = dict(zip(self.keywords, self.keywords_w)) if self.keywords else {} - return self.positional_args, kwds_w + return self.positional_args, self.keywords def match_signature(self, signature, defaults_w): """Parse args and kwargs according to the signature of a code object, @@ -189,41 +134,29 @@ def unmatch_signature(self, signature, data_w): """kind of inverse of match_signature""" - need_cnt = len(self.positional_args) - need_kwds = self.keywords or [] - space = self.space argnames, varargname, kwargname = signature assert kwargname is None cnt = len(argnames) - data_args_w = data_w[:cnt] + need_cnt = len(self.positional_args) if varargname: - data_w_stararg = data_w[cnt] - cnt += 1 - else: - data_w_stararg = space.newtuple([]) + assert len(data_w) == cnt + 1 + stararg_w = self.unpackiterable(data_w[cnt]) + if stararg_w: + args_w = data_w[:cnt] + stararg_w + assert len(args_w) == need_cnt + assert not self.keywords + return ArgumentsForTranslation(args_w, {}) + else: + data_w = data_w[:-1] assert len(data_w) == cnt + assert len(data_w) >= need_cnt + args_w = data_w[:need_cnt] + _kwds_w = dict(zip(argnames[need_cnt:], data_w[need_cnt:])) + keywords_w = [_kwds_w[key] for key in self.keywords] + return ArgumentsForTranslation(args_w, dict(zip(self.keywords, keywords_w))) - unfiltered_kwds_w = {} - if len(data_args_w) >= need_cnt: - args_w = data_args_w[:need_cnt] - for argname, w_arg in zip(argnames[need_cnt:], data_args_w[need_cnt:]): - unfiltered_kwds_w[argname] = w_arg - assert not space.bool(data_w_stararg) - else: - stararg_w = space.unpackiterable(data_w_stararg) - args_w = data_args_w + stararg_w - assert len(args_w) == need_cnt - - keywords = [] - keywords_w = [] - for key in need_kwds: - keywords.append(key) - keywords_w.append(unfiltered_kwds_w[key]) - - return ArgumentsForTranslation(self.space, args_w, keywords, keywords_w) - - @staticmethod - def fromshape(space, (shape_cnt, shape_keys, shape_star, shape_stst), data_w): + @classmethod + def fromshape(cls, (shape_cnt, shape_keys, shape_star), data_w): args_w = data_w[:shape_cnt] p = end_keys = shape_cnt + len(shape_keys) if shape_star: @@ -231,40 +164,12 @@ p += 1 else: w_star = None - if shape_stst: - w_starstar = data_w[p] - p += 1 - else: - w_starstar = None - return ArgumentsForTranslation(space, args_w, list(shape_keys), - data_w[shape_cnt:end_keys], w_star, - w_starstar) + return cls(args_w, dict(zip(shape_keys, data_w[shape_cnt:end_keys])), + w_star) - def flatten(self): - """ Argument <-> list of w_objects together with "shape" information """ - shape_cnt, shape_keys, shape_star, shape_stst = self._rawshape() - data_w = self.arguments_w + [self.keywords_w[self.keywords.index(key)] - for key in shape_keys] - if shape_star: - data_w.append(self.w_stararg) - if shape_stst: - data_w.append(self.w_starstararg) - return (shape_cnt, shape_keys, shape_star, shape_stst), data_w - def _rawshape(self, nextra=0): - shape_cnt = len(self.arguments_w) + nextra # Number of positional args - if self.keywords: - shape_keys = self.keywords[:] # List of keywords (strings) - shape_keys.sort() - else: - shape_keys = [] - shape_star = self.w_stararg is not None # Flag: presence of *arg - shape_stst = self.w_starstararg is not None # Flag: presence of **kwds - return shape_cnt, tuple(shape_keys), shape_star, shape_stst # shape_keys are sorted - - -def rawshape(args, nextra=0): - return args._rawshape(nextra) +def rawshape(args): + return args._rawshape() # @@ -336,31 +241,12 @@ class ArgErrUnknownKwds(ArgErr): - def __init__(self, space, num_remainingkwds, keywords, kwds_mapping, - keyword_names_w): + def __init__(self, num_remainingkwds, keywords, kwds_mapping): name = '' self.num_kwds = num_remainingkwds if num_remainingkwds == 1: - for i in range(len(keywords)): - if i not in kwds_mapping: - name = keywords[i] - if name is None: - # We'll assume it's unicode. Encode it. - # Careful, I *think* it should not be possible to - # get an IndexError here but you never know. - try: - if keyword_names_w is None: - raise IndexError - # note: negative-based indexing from the end - w_name = keyword_names_w[i - len(keywords)] - except IndexError: - name = '?' - else: - w_enc = space.wrap(space.sys.defaultencoding) - w_err = space.wrap("replace") - w_name = space.call_method(w_name, "encode", w_enc, - w_err) - name = space.str_w(w_name) + for name in keywords: + if name not in kwds_mapping: break self.kwd_name = name diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -5,19 +5,15 @@ import py import operator from rpython.tool.pairtype import pair, pairtype -from rpython.annotator.model import SomeObject, SomeInteger, SomeBool, s_Bool -from rpython.annotator.model import SomeString, SomeChar, SomeList, SomeDict,\ - SomeOrderedDict -from rpython.annotator.model import SomeUnicodeCodePoint, SomeUnicodeString -from rpython.annotator.model import SomeTuple, SomeImpossibleValue, s_ImpossibleValue -from rpython.annotator.model import SomeInstance, SomeBuiltin, SomeIterator -from rpython.annotator.model import SomePBC, SomeFloat, s_None, SomeByteArray -from rpython.annotator.model import SomeWeakRef -from rpython.annotator.model import SomeAddress, SomeTypedAddressAccess -from rpython.annotator.model import SomeSingleFloat, SomeLongFloat, SomeType -from rpython.annotator.model import unionof, UnionError, missing_operation -from rpython.annotator.model import read_can_only_throw -from rpython.annotator.model import add_knowntypedata, merge_knowntypedata +from rpython.annotator.model import ( + SomeObject, SomeInteger, SomeBool, s_Bool, SomeString, SomeChar, SomeList, + SomeDict, SomeOrderedDict, SomeUnicodeCodePoint, SomeUnicodeString, + SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, + SomeBuiltin, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, + SomeWeakRef, SomeAddress, SomeTypedAddressAccess, SomeSingleFloat, + SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, + missing_operation, read_can_only_throw, add_knowntypedata, + merge_knowntypedata,) from rpython.annotator.bookkeeper import getbookkeeper from rpython.flowspace.model import Variable, Constant from rpython.rlib import rarithmetic @@ -197,7 +193,9 @@ getitem_key = getitem_idx_key -class __extend__(pairtype(SomeType, SomeType)): +class __extend__(pairtype(SomeType, SomeType), + pairtype(SomeType, SomeConstantType), + pairtype(SomeConstantType, SomeType),): def union((obj1, obj2)): result = SomeType() diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -12,13 +12,13 @@ SomeBuiltin, SomePBC, SomeInteger, TLS, SomeAddress, SomeUnicodeCodePoint, s_None, s_ImpossibleValue, SomeLLADTMeth, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, - SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray) + SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray, SomeConstantType) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef from rpython.annotator import description from rpython.annotator.signature import annotationoftype -from rpython.annotator.argument import ArgumentsForTranslation, RPythonCallsSpace +from rpython.annotator.argument import ArgumentsForTranslation from rpython.rlib.objectmodel import r_dict, Symbolic from rpython.tool.algo.unionfind import UnionFind from rpython.rtyper.lltypesystem import lltype, llmemory @@ -436,11 +436,7 @@ elif isinstance(x, llmemory.fakeaddress): result = SomeAddress() elif tp is type: - if (x is type(None) or # add cases here if needed - x.__module__ == 'rpython.rtyper.lltypesystem.lltype'): - result = SomeType() - else: - result = SomePBC([self.getdesc(x)]) + result = SomeConstantType(x, self) elif callable(x): if hasattr(x, 'im_self') and hasattr(x, 'im_func'): # on top of PyPy, for cases like 'l.append' where 'l' is a @@ -700,12 +696,11 @@ return op def build_args(self, op, args_s): - space = RPythonCallsSpace() if op == "simple_call": - return ArgumentsForTranslation(space, list(args_s)) + return ArgumentsForTranslation(list(args_s)) elif op == "call_args": return ArgumentsForTranslation.fromshape( - space, args_s[0].const, # shape + args_s[0].const, # shape list(args_s[1:])) def ondegenerated(self, what, s_value, where=None, called_from_graph=None): diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -312,7 +312,7 @@ r_func, nimplicitarg = s_repr.const.get_r_implfunc() nbargs = len(args_s) + nimplicitarg - s_sigs = r_func.get_s_signatures((nbargs, (), False, False)) + s_sigs = r_func.get_s_signatures((nbargs, (), False)) if len(s_sigs) != 1: raise TyperError("cannot hlinvoke callable %r with not uniform" "annotations: %r" % (s_repr.const, diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -879,11 +879,12 @@ self.name, flags) + @staticmethod def consider_call_site(bookkeeper, family, descs, args, s_result, op): - shape = rawshape(args, nextra=1) # account for the extra 'self' + cnt, keys, star = rawshape(args) + shape = cnt + 1, keys, star # account for the extra 'self' row = FunctionDesc.row_to_consider(descs, args, op) family.calltable_add_row(shape, row) - consider_call_site = staticmethod(consider_call_site) def rowkey(self): # we are computing call families and call tables that always contain @@ -1039,11 +1040,12 @@ args = args.prepend(s_self) return self.funcdesc.pycall(schedule, args, s_previous_result, op) + @staticmethod def consider_call_site(bookkeeper, family, descs, args, s_result, op): - shape = rawshape(args, nextra=1) # account for the extra 'self' + cnt, keys, star = rawshape(args) + shape = cnt + 1, keys, star # account for the extra 'self' row = FunctionDesc.row_to_consider(descs, args, op) family.calltable_add_row(shape, row) - consider_call_site = staticmethod(consider_call_site) def rowkey(self): return self.funcdesc diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -502,6 +502,14 @@ else: return kt.__name__ +class SomeConstantType(SomePBC): + can_be_None = False + subset_of = None + def __init__(self, x, bk): + self.descriptions = set([bk.getdesc(x)]) + self.knowntype = type(x) + self.const = x + class SomeBuiltin(SomeObject): "Stands for a built-in function or method with special-cased analysis." diff --git a/rpython/annotator/policy.py b/rpython/annotator/policy.py --- a/rpython/annotator/policy.py +++ b/rpython/annotator/policy.py @@ -1,31 +1,19 @@ # base annotation policy for specialization from rpython.annotator.specialize import default_specialize as default -from rpython.annotator.specialize import specialize_argvalue, specialize_argtype, specialize_arglistitemtype, specialize_arg_or_var -from rpython.annotator.specialize import memo, specialize_call_location +from rpython.annotator.specialize import ( + specialize_argvalue, specialize_argtype, specialize_arglistitemtype, + specialize_arg_or_var, memo, specialize_call_location) -class BasicAnnotatorPolicy(object): +class AnnotatorPolicy(object): + """ + Possibly subclass and pass an instance to the annotator to control + special-casing during annotation + """ def event(pol, bookkeeper, what, *args): pass - def get_specializer(pol, tag): - return pol.no_specialization - - def no_specialization(pol, funcdesc, args_s): - return funcdesc.cachedgraph(None) - - def no_more_blocks_to_annotate(pol, annotator): - # hint to all pending specializers that we are done - for callback in annotator.bookkeeper.pending_specializations: - callback() - del annotator.bookkeeper.pending_specializations[:] - -class AnnotatorPolicy(BasicAnnotatorPolicy): - """ - Possibly subclass and pass an instance to the annotator to control special casing during annotation - """ - def get_specializer(pol, directive): if directive is None: return pol.default_specialize @@ -74,3 +62,9 @@ def specialize__ll_and_arg(pol, *args): from rpython.rtyper.annlowlevel import LowLevelAnnotatorPolicy return LowLevelAnnotatorPolicy.specialize__ll_and_arg(*args) + + def no_more_blocks_to_annotate(pol, annotator): + # hint to all pending specializers that we are done + for callback in annotator.bookkeeper.pending_specializations: + callback() + del annotator.bookkeeper.pending_specializations[:] diff --git a/rpython/annotator/specialize.py b/rpython/annotator/specialize.py --- a/rpython/annotator/specialize.py +++ b/rpython/annotator/specialize.py @@ -5,6 +5,7 @@ from rpython.tool.algo.unionfind import UnionFind from rpython.flowspace.model import Block, Link, Variable, SpaceOperation from rpython.flowspace.model import checkgraph +from rpython.flowspace.operation import op from rpython.annotator import model as annmodel from rpython.flowspace.argument import Signature @@ -33,7 +34,8 @@ argscopy = [Variable(v) for v in graph.getargs()] starargs = [Variable('stararg%d'%i) for i in range(nb_extra_args)] newstartblock = Block(argscopy[:-1] + starargs) - newtup = SpaceOperation('newtuple', starargs, argscopy[-1]) + newtup = op.newtuple(*starargs) + newtup.result = argscopy[-1] newstartblock.operations.append(newtup) newstartblock.closeblock(Link(argscopy, graph.startblock)) graph.startblock = newstartblock diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -15,6 +15,7 @@ from rpython.rlib.rarithmetic import r_singlefloat from rpython.rlib import objectmodel from rpython.flowspace.objspace import build_flow, FlowingError +from rpython.flowspace.operation import op from rpython.translator.test import snippet @@ -69,12 +70,11 @@ return x+1 """ x = Variable("x") - result = Variable("result") - op = SpaceOperation("add", [x, Constant(1)], result) + oper = op.add(x, Constant(1)) block = Block([x]) fun = FunctionGraph("f", block) - block.operations.append(op) - block.closeblock(Link([result], fun.returnblock)) + block.operations.append(oper) + block.closeblock(Link([oper.result], fun.returnblock)) a = self.RPythonAnnotator() a.addpendingblock(fun, fun.startblock, [annmodel.SomeInteger()]) a.complete() @@ -90,20 +90,18 @@ """ i1 = Variable("i1") i2 = Variable("i2") - i3 = Variable("i3") - conditionres = Variable("conditionres") - conditionop = SpaceOperation("gt", [i1, Constant(0)], conditionres) - decop = SpaceOperation("add", [i2, Constant(-1)], i3) + conditionop = op.gt(i1, Constant(0)) + decop = op.add(i2, Constant(-1)) headerblock = Block([i1]) whileblock = Block([i2]) fun = FunctionGraph("f", headerblock) headerblock.operations.append(conditionop) - headerblock.exitswitch = conditionres + headerblock.exitswitch = conditionop.result headerblock.closeblock(Link([i1], fun.returnblock, False), Link([i1], whileblock, True)) whileblock.operations.append(decop) - whileblock.closeblock(Link([i3], headerblock)) + whileblock.closeblock(Link([decop.result], headerblock)) a = self.RPythonAnnotator() a.addpendingblock(fun, fun.startblock, [annmodel.SomeInteger()]) @@ -123,15 +121,12 @@ i1 = Variable("i1") i2 = Variable("i2") i3 = Variable("i3") - i4 = Variable("i4") sum2 = Variable("sum2") sum3 = Variable("sum3") - sum4 = Variable("sum4") - - conditionres = Variable("conditionres") - conditionop = SpaceOperation("gt", [i2, Constant(0)], conditionres) - decop = SpaceOperation("add", [i3, Constant(-1)], i4) - addop = SpaceOperation("add", [i3, sum3], sum4) + + conditionop = op.gt(i2, Constant(0)) + decop = op.add(i3, Constant(-1)) + addop = op.add(i3, sum3) startblock = Block([i1]) headerblock = Block([i2, sum2]) whileblock = Block([i3, sum3]) @@ -139,12 +134,12 @@ fun = FunctionGraph("f", startblock) startblock.closeblock(Link([i1, Constant(0)], headerblock)) headerblock.operations.append(conditionop) - headerblock.exitswitch = conditionres + headerblock.exitswitch = conditionop.result headerblock.closeblock(Link([sum2], fun.returnblock, False), Link([i2, sum2], whileblock, True)) whileblock.operations.append(addop) whileblock.operations.append(decop) - whileblock.closeblock(Link([i4, sum4], headerblock)) + whileblock.closeblock(Link([decop.result, addop.result], headerblock)) a = self.RPythonAnnotator() a.addpendingblock(fun, fun.startblock, [annmodel.SomeInteger()]) @@ -1065,8 +1060,9 @@ gf2 = graphof(a, f2) gf3 = graphof(a, f3) - assert fam1.calltables == {(2, (), False, False): [{fdesc1: gf1}], (1, (), False, False): [{fdesc1: gf1}]} - assert fam2.calltables == {(1, (), False, False): [{fdesc2: gf2, fdesc3: gf3}]} + assert fam1.calltables == {(2, (), False): [{fdesc1: gf1}], + (1, (), False): [{fdesc1: gf1}]} + assert fam2.calltables == {(1, (), False): [{fdesc2: gf2, fdesc3: gf3}]} def test_pbc_call_ins(self): class A(object): @@ -1117,14 +1113,14 @@ gfA_m = graphof(a, A.m.im_func) gfC_m = graphof(a, C.m.im_func) - assert famB_n.calltables == {(1, (), False, False): [{mdescB_n.funcdesc: gfB_n}] } - assert famA_m.calltables == {(1, (), False, False): [{mdescA_m.funcdesc: gfA_m, mdescC_m.funcdesc: gfC_m }] } + assert famB_n.calltables == {(1, (), False): [{mdescB_n.funcdesc: gfB_n}] } + assert famA_m.calltables == {(1, (), False): [{mdescA_m.funcdesc: gfA_m, mdescC_m.funcdesc: gfC_m }] } mdescCinit = getmdesc(C().__init__) famCinit = mdescCinit.getcallfamily() gfCinit = graphof(a, C.__init__.im_func) - assert famCinit.calltables == {(1, (), False, False): [{mdescCinit.funcdesc: gfCinit}] } + assert famCinit.calltables == {(1, (), False): [{mdescCinit.funcdesc: gfCinit}] } def test_isinstance_usigned(self): def f(x): @@ -2053,7 +2049,7 @@ someint = annmodel.SomeInteger() - assert (fdesc.get_s_signatures((2,(),False,False)) + assert (fdesc.get_s_signatures((2, (), False)) == [([someint,someint],someint)]) def test_emulated_pbc_call_callback(self): diff --git a/rpython/annotator/test/test_argument.py b/rpython/annotator/test/test_argument.py --- a/rpython/annotator/test/test_argument.py +++ b/rpython/annotator/test/test_argument.py @@ -1,43 +1,30 @@ # -*- coding: utf-8 -*- import py from rpython.annotator.argument import ArgumentsForTranslation, rawshape -from rpython.flowspace.argument import Signature +from rpython.flowspace.argument import Signature, CallSpec -class DummySpace(object): +class MockArgs(ArgumentsForTranslation): def newtuple(self, items): return tuple(items) - def bool(self, obj): - return bool(obj) - def unpackiterable(self, it): return list(it) -def make_arguments_for_translation(space, args_w, keywords_w={}, - w_stararg=None, w_starstararg=None): - return ArgumentsForTranslation(space, args_w, keywords_w.keys(), - keywords_w.values(), w_stararg, - w_starstararg) - class TestArgumentsForTranslation(object): def test_prepend(self): - space = DummySpace() - args = ArgumentsForTranslation(space, ["0"]) + args = MockArgs(["0"]) args1 = args.prepend("thingy") assert args1 is not args assert args1.arguments_w == ["thingy", "0"] - assert args1.keywords is args.keywords - assert args1.keywords_w is args.keywords_w + assert args1.keywords == args.keywords def test_fixedunpacked(self): - space = DummySpace() - - args = ArgumentsForTranslation(space, [], ["k"], [1]) + args = MockArgs([], {"k": 1}) py.test.raises(ValueError, args.fixedunpack, 1) - args = ArgumentsForTranslation(space, ["a", "b"]) + args = MockArgs(["a", "b"]) py.test.raises(ValueError, args.fixedunpack, 0) py.test.raises(ValueError, args.fixedunpack, 1) py.test.raises(ValueError, args.fixedunpack, 3) @@ -46,122 +33,89 @@ assert args.fixedunpack(2) == ['a', 'b'] def test_unmatch_signature(self): - space = DummySpace() - args = make_arguments_for_translation(space, [1,2,3]) + args = MockArgs([1, 2, 3]) sig = Signature(['a', 'b', 'c'], None, None) data = args.match_signature(sig, []) new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() - args = make_arguments_for_translation(space, [1]) + args = MockArgs([1]) sig = Signature(['a', 'b', 'c'], None, None) data = args.match_signature(sig, [2, 3]) new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() - args = make_arguments_for_translation(space, [1,2,3,4,5]) + args = MockArgs([1, 2, 3, 4, 5]) sig = Signature(['a', 'b', 'c'], 'r', None) data = args.match_signature(sig, []) new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() - args = make_arguments_for_translation(space, [1], {'c': 3, 'b': 2}) + args = MockArgs([1], {'c': 3, 'b': 2}) sig = Signature(['a', 'b', 'c'], None, None) data = args.match_signature(sig, []) new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() - args = make_arguments_for_translation(space, [1], {'c': 5}) + args = MockArgs([1], {'c': 5}) sig = Signature(['a', 'b', 'c'], None, None) data = args.match_signature(sig, [2, 3]) new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() def test_rawshape(self): - space = DummySpace() - args = make_arguments_for_translation(space, [1,2,3]) - assert rawshape(args) == (3, (), False, False) + args = MockArgs([1, 2, 3]) + assert rawshape(args) == (3, (), False) - args = make_arguments_for_translation(space, [1]) - assert rawshape(args, 2) == (3, (), False, False) + args = MockArgs([1, 2, 3, 4, 5]) + assert rawshape(args) == (5, (), False) - args = make_arguments_for_translation(space, [1,2,3,4,5]) - assert rawshape(args) == (5, (), False, False) + args = MockArgs([1], {'c': 3, 'b': 2}) + assert rawshape(args) == (1, ('b', 'c'), False) - args = make_arguments_for_translation(space, [1], {'c': 3, 'b': 2}) - assert rawshape(args) == (1, ('b', 'c'), False, False) + args = MockArgs([1], {'c': 5}) + assert rawshape(args) == (1, ('c', ), False) - args = make_arguments_for_translation(space, [1], {'c': 5}) - assert rawshape(args) == (1, ('c', ), False, False) + args = MockArgs([1], {'c': 5, 'd': 7}) + assert rawshape(args) == (1, ('c', 'd'), False) - args = make_arguments_for_translation(space, [1], {'c': 5, 'd': 7}) - assert rawshape(args) == (1, ('c', 'd'), False, False) - - args = make_arguments_for_translation(space, [1,2,3,4,5], {'e': 5, 'd': 7}) - assert rawshape(args) == (5, ('d', 'e'), False, False) - - - def test_flatten(self): - space = DummySpace() - args = make_arguments_for_translation(space, [1,2,3]) - assert args.flatten() == ((3, (), False, False), [1, 2, 3]) - - args = make_arguments_for_translation(space, [1]) - assert args.flatten() == ((1, (), False, False), [1]) - - args = make_arguments_for_translation(space, [1,2,3,4,5]) - assert args.flatten() == ((5, (), False, False), [1,2,3,4,5]) - - args = make_arguments_for_translation(space, [1], {'c': 3, 'b': 2}) - assert args.flatten() == ((1, ('b', 'c'), False, False), [1, 2, 3]) - - args = make_arguments_for_translation(space, [1], {'c': 5}) - assert args.flatten() == ((1, ('c', ), False, False), [1, 5]) - - args = make_arguments_for_translation(space, [1], {'c': 5, 'd': 7}) - assert args.flatten() == ((1, ('c', 'd'), False, False), [1, 5, 7]) - - args = make_arguments_for_translation(space, [1,2,3,4,5], {'e': 5, 'd': 7}) - assert args.flatten() == ((5, ('d', 'e'), False, False), [1, 2, 3, 4, 5, 7, 5]) + args = MockArgs([1, 2, 3, 4, 5], {'e': 5, 'd': 7}) + assert rawshape(args) == (5, ('d', 'e'), False) def test_stararg_flowspace_variable(self): - space = DummySpace() var = object() - shape = ((2, ('g', ), True, False), [1, 2, 9, var]) - args = make_arguments_for_translation(space, [1,2], {'g': 9}, - w_stararg=var) + shape = ((2, ('g', ), True), [1, 2, 9, var]) + args = MockArgs([1, 2], {'g': 9}, w_stararg=var) assert args.flatten() == shape - args = ArgumentsForTranslation.fromshape(space, *shape) + args = MockArgs.fromshape(*shape) assert args.flatten() == shape - def test_fromshape(self): - space = DummySpace() - shape = ((3, (), False, False), [1, 2, 3]) - args = ArgumentsForTranslation.fromshape(space, *shape) + shape = ((3, (), False), [1, 2, 3]) + args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((1, (), False, False), [1]) - args = ArgumentsForTranslation.fromshape(space, *shape) + shape = ((1, (), False), [1]) + args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((5, (), False, False), [1,2,3,4,5]) - args = ArgumentsForTranslation.fromshape(space, *shape) + shape = ((5, (), False), [1, 2, 3, 4, 5]) + args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((1, ('b', 'c'), False, False), [1, 2, 3]) - args = ArgumentsForTranslation.fromshape(space, *shape) + shape = ((1, ('b', 'c'), False), [1, 2, 3]) + args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((1, ('c', ), False, False), [1, 5]) - args = ArgumentsForTranslation.fromshape(space, *shape) + shape = ((1, ('c', ), False), [1, 5]) + args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((1, ('c', 'd'), False, False), [1, 5, 7]) - args = ArgumentsForTranslation.fromshape(space, *shape) + shape = ((1, ('c', 'd'), False), [1, 5, 7]) + args = MockArgs.fromshape(*shape) assert args.flatten() == shape - shape = ((5, ('d', 'e'), False, False), [1, 2, 3, 4, 5, 7, 5]) - args = ArgumentsForTranslation.fromshape(space, *shape) + shape = ((5, ('d', 'e'), False), [1, 2, 3, 4, 5, 7, 5]) + args = MockArgs.fromshape(*shape) assert args.flatten() == shape diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -35,33 +35,33 @@ class __extend__(SomeObject): - def type(obj, *moreargs): + def type(self, *moreargs): if moreargs: raise Exception('type() called with more than one argument') r = SomeType() bk = getbookkeeper() - op = bk._find_current_op(opname="type", arity=1, pos=0, s_type=obj) + op = bk._find_current_op(opname="type", arity=1, pos=0, s_type=self) r.is_type_of = [op.args[0]] return r - def issubtype(obj, s_cls): - if hasattr(obj, 'is_type_of'): - vars = obj.is_type_of + def issubtype(self, s_cls): + if hasattr(self, 'is_type_of'): + vars = self.is_type_of annotator = getbookkeeper().annotator return builtin.builtin_isinstance(annotator.binding(vars[0]), s_cls, vars) - if obj.is_constant() and s_cls.is_constant(): - return immutablevalue(issubclass(obj.const, s_cls.const)) + if self.is_constant() and s_cls.is_constant(): + return immutablevalue(issubclass(self.const, s_cls.const)) return s_Bool - def len(obj): + def len(self): return SomeInteger(nonneg=True) - def bool_behavior(obj, s): - if obj.is_immutable_constant(): - s.const = bool(obj.const) + def bool_behavior(self, s): + if self.is_immutable_constant(): + s.const = bool(self.const) else: - s_len = obj.len() + s_len = self.len() if s_len.is_immutable_constant(): s.const = s_len.const > 0 @@ -80,83 +80,83 @@ r.set_knowntypedata(knowntypedata) return r - def hash(obj): + def hash(self): raise AnnotatorError("cannot use hash() in RPython") - def str(obj): - getbookkeeper().count('str', obj) + def str(self): + getbookkeeper().count('str', self) return SomeString() - def unicode(obj): - getbookkeeper().count('unicode', obj) + def unicode(self): + getbookkeeper().count('unicode', self) return SomeUnicodeString() - def repr(obj): - getbookkeeper().count('repr', obj) + def repr(self): + getbookkeeper().count('repr', self) return SomeString() - def hex(obj): - getbookkeeper().count('hex', obj) + def hex(self): + getbookkeeper().count('hex', self) return SomeString() - def oct(obj): - getbookkeeper().count('oct', obj) + def oct(self): + getbookkeeper().count('oct', self) return SomeString() - def id(obj): + def id(self): raise Exception("cannot use id() in RPython; " "see objectmodel.compute_xxx()") - def int(obj): + def int(self): return SomeInteger() - def float(obj): + def float(self): return SomeFloat() - def delattr(obj, s_attr): - if obj.__class__ != SomeObject or obj.knowntype != object: + def delattr(self, s_attr): + if self.__class__ != SomeObject or self.knowntype != object: getbookkeeper().warning( ("delattr on potentally non-SomeObjects is not RPythonic: delattr(%r,%r)" % - (obj, s_attr))) + (self, s_attr))) - def find_method(obj, name): + def find_method(self, name): "Look for a special-case implementation for the named method." try: - analyser = getattr(obj.__class__, 'method_' + name) + analyser = getattr(self.__class__, 'method_' + name) except AttributeError: return None else: - return SomeBuiltin(analyser, obj, name) + return SomeBuiltin(analyser, self, name) - def getattr(obj, s_attr): + def getattr(self, s_attr): # get a SomeBuiltin if the SomeObject has # a corresponding method to handle it if not s_attr.is_constant() or not isinstance(s_attr.const, str): raise AnnotatorError("getattr(%r, %r) has non-constant argument" - % (obj, s_attr)) + % (self, s_attr)) attr = s_attr.const - s_method = obj.find_method(attr) + s_method = self.find_method(attr) if s_method is not None: return s_method # if the SomeObject is itself a constant, allow reading its attrs - if obj.is_immutable_constant() and hasattr(obj.const, attr): - return immutablevalue(getattr(obj.const, attr)) - raise AnnotatorError("Cannot find attribute %r on %r" % (attr, obj)) + if self.is_immutable_constant() and hasattr(self.const, attr): + return immutablevalue(getattr(self.const, attr)) + raise AnnotatorError("Cannot find attribute %r on %r" % (attr, self)) getattr.can_only_throw = [] - def bind_callables_under(obj, classdef, name): - return obj # default unbound __get__ implementation + def bind_callables_under(self, classdef, name): + return self # default unbound __get__ implementation - def simple_call(obj, *args_s): - return obj.call(getbookkeeper().build_args("simple_call", args_s)) + def simple_call(self, *args_s): + return self.call(getbookkeeper().build_args("simple_call", args_s)) - def call_args(obj, *args_s): - return obj.call(getbookkeeper().build_args("call_args", args_s)) + def call_args(self, *args_s): + return self.call(getbookkeeper().build_args("call_args", args_s)) - def call(obj, args, implicit_init=False): + def call(self, args, implicit_init=False): raise AnnotatorError("Cannot prove that the object is callable") - def op_contains(obj, s_element): + def op_contains(self, s_element): return s_Bool op_contains.can_only_throw = [] @@ -165,10 +165,10 @@ class __extend__(SomeFloat): - def pos(flt): - return flt + def pos(self): + return self - def neg(flt): + def neg(self): return SomeFloat() abs = neg @@ -233,105 +233,105 @@ class __extend__(SomeTuple): - def len(tup): - return immutablevalue(len(tup.items)) + def len(self): + return immutablevalue(len(self.items)) - def iter(tup): - getbookkeeper().count("tuple_iter", tup) - return SomeIterator(tup) + def iter(self): + getbookkeeper().count("tuple_iter", self) + return SomeIterator(self) iter.can_only_throw = [] - def getanyitem(tup): - return unionof(*tup.items) + def getanyitem(self): + return unionof(*self.items) - def getslice(tup, s_start, s_stop): + def getslice(self, s_start, s_stop): assert s_start.is_immutable_constant(),"tuple slicing: needs constants" assert s_stop.is_immutable_constant(), "tuple slicing: needs constants" - items = tup.items[s_start.const:s_stop.const] + items = self.items[s_start.const:s_stop.const] return SomeTuple(items) class __extend__(SomeList): - def method_append(lst, s_value): - lst.listdef.resize() - lst.listdef.generalize(s_value) + def method_append(self, s_value): + self.listdef.resize() + self.listdef.generalize(s_value) - def method_extend(lst, s_iterable): - lst.listdef.resize() + def method_extend(self, s_iterable): + self.listdef.resize() if isinstance(s_iterable, SomeList): # unify the two lists - lst.listdef.agree(s_iterable.listdef) + self.listdef.agree(s_iterable.listdef) else: s_iter = s_iterable.iter() - lst.method_append(s_iter.next()) + self.method_append(s_iter.next()) - def method_reverse(lst): - lst.listdef.mutate() + def method_reverse(self): + self.listdef.mutate() - def method_insert(lst, s_index, s_value): - lst.method_append(s_value) + def method_insert(self, s_index, s_value): + self.method_append(s_value) - def method_remove(lst, s_value): - lst.listdef.resize() - lst.listdef.generalize(s_value) + def method_remove(self, s_value): + self.listdef.resize() + self.listdef.generalize(s_value) - def method_pop(lst, s_index=None): - lst.listdef.resize() - return lst.listdef.read_item() + def method_pop(self, s_index=None): + self.listdef.resize() + return self.listdef.read_item() method_pop.can_only_throw = [IndexError] - def method_index(lst, s_value): + def method_index(self, s_value): getbookkeeper().count("list_index") - lst.listdef.generalize(s_value) + self.listdef.generalize(s_value) return SomeInteger(nonneg=True) - def len(lst): - s_item = lst.listdef.read_item() + def len(self): + s_item = self.listdef.read_item() if isinstance(s_item, SomeImpossibleValue): return immutablevalue(0) - return SomeObject.len(lst) + return SomeObject.len(self) - def iter(lst): - return SomeIterator(lst) + def iter(self): + return SomeIterator(self) iter.can_only_throw = [] - def getanyitem(lst): - return lst.listdef.read_item() + def getanyitem(self): + return self.listdef.read_item() - def op_contains(lst, s_element): - lst.listdef.generalize(s_element) + def op_contains(self, s_element): + self.listdef.generalize(s_element) return s_Bool op_contains.can_only_throw = [] - def hint(lst, *args_s): + def hint(self, *args_s): hints = args_s[-1].const if 'maxlength' in hints: # only for iteration over lists or dicts at the moment, # not over an iterator object (because it has no known length) s_iterable = args_s[0] if isinstance(s_iterable, (SomeList, SomeDict)): - lst = SomeList(lst.listdef) # create a fresh copy - lst.listdef.resize() - lst.listdef.listitem.hint_maxlength = True + self = SomeList(self.listdef) # create a fresh copy + self.listdef.resize() + self.listdef.listitem.hint_maxlength = True elif 'fence' in hints: - lst = lst.listdef.offspring() - return lst + self = self.listdef.offspring() + return self - def getslice(lst, s_start, s_stop): + def getslice(self, s_start, s_stop): check_negative_slice(s_start, s_stop) - return lst.listdef.offspring() + return self.listdef.offspring() - def setslice(lst, s_start, s_stop, s_iterable): + def setslice(self, s_start, s_stop, s_iterable): check_negative_slice(s_start, s_stop) if not isinstance(s_iterable, SomeList): raise Exception("list[start:stop] = x: x must be a list") - lst.listdef.mutate() - lst.listdef.agree(s_iterable.listdef) + self.listdef.mutate() + self.listdef.agree(s_iterable.listdef) # note that setslice is not allowed to resize a list in RPython - def delslice(lst, s_start, s_stop): + def delslice(self, s_start, s_stop): check_negative_slice(s_start, s_stop) - lst.listdef.resize() + self.listdef.resize() def check_negative_slice(s_start, s_stop, error="slicing"): if isinstance(s_start, SomeInteger) and not s_start.nonneg: @@ -344,29 +344,29 @@ class __extend__(SomeDict): - def _is_empty(dct): - s_key = dct.dictdef.read_key() - s_value = dct.dictdef.read_value() + def _is_empty(self): + s_key = self.dictdef.read_key() + s_value = self.dictdef.read_value() return (isinstance(s_key, SomeImpossibleValue) or isinstance(s_value, SomeImpossibleValue)) - def len(dct): - if dct._is_empty(): + def len(self): + if self._is_empty(): return immutablevalue(0) - return SomeObject.len(dct) + return SomeObject.len(self) - def iter(dct): - return SomeIterator(dct) + def iter(self): + return SomeIterator(self) iter.can_only_throw = [] - def getanyitem(dct, variant='keys'): + def getanyitem(self, variant='keys'): if variant == 'keys': - return dct.dictdef.read_key() + return self.dictdef.read_key() elif variant == 'values': - return dct.dictdef.read_value() + return self.dictdef.read_value() elif variant == 'items': - s_key = dct.dictdef.read_key() - s_value = dct.dictdef.read_value() + s_key = self.dictdef.read_key() + s_value = self.dictdef.read_value() if (isinstance(s_key, SomeImpossibleValue) or isinstance(s_value, SomeImpossibleValue)): return s_ImpossibleValue @@ -375,59 +375,59 @@ else: raise ValueError - def method_get(dct, key, dfl): - dct.dictdef.generalize_key(key) - dct.dictdef.generalize_value(dfl) - return dct.dictdef.read_value() + def method_get(self, key, dfl): + self.dictdef.generalize_key(key) + self.dictdef.generalize_value(dfl) + return self.dictdef.read_value() method_setdefault = method_get - def method_copy(dct): - return SomeDict(dct.dictdef) + def method_copy(self): + return SomeDict(self.dictdef) def method_update(dct1, dct2): if s_None.contains(dct2): return SomeImpossibleValue() dct1.dictdef.union(dct2.dictdef) - def method_keys(dct): - return getbookkeeper().newlist(dct.dictdef.read_key()) + def method_keys(self): + return getbookkeeper().newlist(self.dictdef.read_key()) - def method_values(dct): - return getbookkeeper().newlist(dct.dictdef.read_value()) + def method_values(self): + return getbookkeeper().newlist(self.dictdef.read_value()) - def method_items(dct): - return getbookkeeper().newlist(dct.getanyitem('items')) + def method_items(self): + return getbookkeeper().newlist(self.getanyitem('items')) - def method_iterkeys(dct): - return SomeIterator(dct, 'keys') + def method_iterkeys(self): + return SomeIterator(self, 'keys') - def method_itervalues(dct): - return SomeIterator(dct, 'values') + def method_itervalues(self): + return SomeIterator(self, 'values') - def method_iteritems(dct): - return SomeIterator(dct, 'items') + def method_iteritems(self): + return SomeIterator(self, 'items') - def method_clear(dct): + def method_clear(self): pass - def method_popitem(dct): - return dct.getanyitem('items') + def method_popitem(self): + return self.getanyitem('items') - def method_pop(dct, s_key, s_dfl=None): - dct.dictdef.generalize_key(s_key) + def method_pop(self, s_key, s_dfl=None): + self.dictdef.generalize_key(s_key) if s_dfl is not None: - dct.dictdef.generalize_value(s_dfl) - return dct.dictdef.read_value() + self.dictdef.generalize_value(s_dfl) + return self.dictdef.read_value() - def _can_only_throw(dic, *ignore): - if dic.dictdef.dictkey.custom_eq_hash: + def _can_only_throw(self, *ignore): + if self.dictdef.dictkey.custom_eq_hash: return None # r_dict: can throw anything return [] # else: no possible exception - def op_contains(dct, s_element): - dct.dictdef.generalize_key(s_element) - if dct._is_empty(): + def op_contains(self, s_element): + self.dictdef.generalize_key(s_element) + if self._is_empty(): s_bool = SomeBool() s_bool.const = False return s_bool @@ -438,89 +438,89 @@ class __extend__(SomeString, SomeUnicodeString): - def method_startswith(str, frag): - if str.is_constant() and frag.is_constant(): - return immutablevalue(str.const.startswith(frag.const)) + def method_startswith(self, frag): + if self.is_constant() and frag.is_constant(): + return immutablevalue(self.const.startswith(frag.const)) return s_Bool - def method_endswith(str, frag): - if str.is_constant() and frag.is_constant(): - return immutablevalue(str.const.endswith(frag.const)) + def method_endswith(self, frag): + if self.is_constant() and frag.is_constant(): + return immutablevalue(self.const.endswith(frag.const)) return s_Bool - def method_find(str, frag, start=None, end=None): + def method_find(self, frag, start=None, end=None): check_negative_slice(start, end, "find") return SomeInteger() - def method_rfind(str, frag, start=None, end=None): + def method_rfind(self, frag, start=None, end=None): check_negative_slice(start, end, "rfind") return SomeInteger() - def method_count(str, frag, start=None, end=None): + def method_count(self, frag, start=None, end=None): check_negative_slice(start, end, "count") return SomeInteger(nonneg=True) - def method_strip(str, chr=None): - return str.basestringclass(no_nul=str.no_nul) + def method_strip(self, chr=None): + return self.basestringclass(no_nul=self.no_nul) - def method_lstrip(str, chr=None): - return str.basestringclass(no_nul=str.no_nul) + def method_lstrip(self, chr=None): + return self.basestringclass(no_nul=self.no_nul) - def method_rstrip(str, chr=None): - return str.basestringclass(no_nul=str.no_nul) + def method_rstrip(self, chr=None): + return self.basestringclass(no_nul=self.no_nul) - def method_join(str, s_list): + def method_join(self, s_list): if s_None.contains(s_list): return SomeImpossibleValue() - getbookkeeper().count("str_join", str) + getbookkeeper().count("str_join", self) s_item = s_list.listdef.read_item() if s_None.contains(s_item): - if isinstance(str, SomeUnicodeString): + if isinstance(self, SomeUnicodeString): return immutablevalue(u"") return immutablevalue("") - no_nul = str.no_nul and s_item.no_nul - return str.basestringclass(no_nul=no_nul) + no_nul = self.no_nul and s_item.no_nul + return self.basestringclass(no_nul=no_nul) - def iter(str): - return SomeIterator(str) + def iter(self): + return SomeIterator(self) iter.can_only_throw = [] - def getanyitem(str): - return str.basecharclass() + def getanyitem(self): + return self.basecharclass() - def method_split(str, patt, max=-1): - getbookkeeper().count("str_split", str, patt) + def method_split(self, patt, max=-1): + getbookkeeper().count("str_split", self, patt) if max == -1 and patt.is_constant() and patt.const == "\0": no_nul = True else: - no_nul = str.no_nul - s_item = str.basestringclass(no_nul=no_nul) + no_nul = self.no_nul + s_item = self.basestringclass(no_nul=no_nul) return getbookkeeper().newlist(s_item) - def method_rsplit(str, patt, max=-1): - getbookkeeper().count("str_rsplit", str, patt) - s_item = str.basestringclass(no_nul=str.no_nul) + def method_rsplit(self, patt, max=-1): + getbookkeeper().count("str_rsplit", self, patt) + s_item = self.basestringclass(no_nul=self.no_nul) return getbookkeeper().newlist(s_item) - def method_replace(str, s1, s2): - return str.basestringclass(no_nul=str.no_nul and s2.no_nul) + def method_replace(self, s1, s2): + return self.basestringclass(no_nul=self.no_nul and s2.no_nul) - def getslice(str, s_start, s_stop): + def getslice(self, s_start, s_stop): check_negative_slice(s_start, s_stop) - result = str.basestringclass(no_nul=str.no_nul) + result = self.basestringclass(no_nul=self.no_nul) return result - def op_contains(str, s_element): + def op_contains(self, s_element): if s_element.is_constant() and s_element.const == "\0": r = SomeBool() bk = getbookkeeper() - op = bk._find_current_op(opname="contains", arity=2, pos=0, s_type=str) + op = bk._find_current_op(opname="contains", arity=2, pos=0, s_type=self) knowntypedata = {} - add_knowntypedata(knowntypedata, False, [op.args[0]], str.nonnulify()) + add_knowntypedata(knowntypedata, False, [op.args[0]], self.nonnulify()) r.set_knowntypedata(knowntypedata) return r else: - return SomeObject.op_contains(str, s_element) + return SomeObject.op_contains(self, s_element) op_contains.can_only_throw = [] def method_format(self, *args): @@ -533,7 +533,7 @@ return SomeByteArray() class __extend__(SomeUnicodeString): - def method_encode(uni, s_enc): + def method_encode(self, s_enc): if not s_enc.is_constant(): raise AnnotatorError("Non-constant encoding not supported") enc = s_enc.const @@ -544,29 +544,29 @@ class __extend__(SomeString): - def method_isdigit(str): + def method_isdigit(self): return s_Bool - def method_isalpha(str): + def method_isalpha(self): return s_Bool - def method_isalnum(str): + def method_isalnum(self): return s_Bool - def method_upper(str): + def method_upper(self): return SomeString() - def method_lower(str): + def method_lower(self): return SomeString() - def method_splitlines(str, s_keep_newlines=None): - s_list = getbookkeeper().newlist(str.basestringclass()) + def method_splitlines(self, s_keep_newlines=None): + s_list = getbookkeeper().newlist(self.basestringclass()) # Force the list to be resizable because ll_splitlines doesn't # preallocate the list. s_list.listdef.listitem.resize() return s_list - def method_decode(str, s_enc): + def method_decode(self, s_enc): if not s_enc.is_constant(): raise AnnotatorError("Non-constant encoding not supported") enc = s_enc.const @@ -577,96 +577,96 @@ class __extend__(SomeChar, SomeUnicodeCodePoint): - def len(chr): + def len(self): return immutablevalue(1) - def ord(str): + def ord(self): return SomeInteger(nonneg=True) class __extend__(SomeChar): - def method_isspace(chr): + def method_isspace(self): return s_Bool - def method_isalnum(chr): + def method_isalnum(self): return s_Bool - def method_islower(chr): + def method_islower(self): return s_Bool - def method_isupper(chr): + def method_isupper(self): return s_Bool - def method_lower(chr): - return chr + def method_lower(self): + return self - def method_upper(chr): - return chr + def method_upper(self): + return self class __extend__(SomeIterator): - def iter(itr): - return itr + def iter(self): + return self iter.can_only_throw = [] - def _can_only_throw(itr): + def _can_only_throw(self): can_throw = [StopIteration] - if isinstance(itr.s_container, SomeDict): + if isinstance(self.s_container, SomeDict): can_throw.append(RuntimeError) return can_throw - def next(itr): - if itr.variant == ("enumerate",): - s_item = itr.s_container.getanyitem() + def next(self): + if self.variant == ("enumerate",): + s_item = self.s_container.getanyitem() return SomeTuple((SomeInteger(nonneg=True), s_item)) - variant = itr.variant + variant = self.variant if variant == ("reversed",): variant = () - return itr.s_container.getanyitem(*variant) + return self.s_container.getanyitem(*variant) next.can_only_throw = _can_only_throw method_next = next class __extend__(SomeInstance): - def _true_getattr(ins, attr): + def _true_getattr(self, attr): if attr == '__class__': - return ins.classdef.read_attr__class__() - attrdef = ins.classdef.find_attribute(attr) + return self.classdef.read_attr__class__() + attrdef = self.classdef.find_attribute(attr) position = getbookkeeper().position_key attrdef.read_locations[position] = True s_result = attrdef.getvalue() # hack: if s_result is a set of methods, discard the ones - # that can't possibly apply to an instance of ins.classdef. + # that can't possibly apply to an instance of self.classdef. # XXX do it more nicely if isinstance(s_result, SomePBC): - s_result = ins.classdef.lookup_filter(s_result, attr, - ins.flags) + s_result = self.classdef.lookup_filter(s_result, attr, + self.flags) elif isinstance(s_result, SomeImpossibleValue): - ins.classdef.check_missing_attribute_update(attr) + self.classdef.check_missing_attribute_update(attr) # blocking is harmless if the attribute is explicitly listed # in the class or a parent class. - for basedef in ins.classdef.getmro(): + for basedef in self.classdef.getmro(): if basedef.classdesc.all_enforced_attrs is not None: if attr in basedef.classdesc.all_enforced_attrs: raise HarmlesslyBlocked("get enforced attr") elif isinstance(s_result, SomeList): - s_result = ins.classdef.classdesc.maybe_return_immutable_list( + s_result = self.classdef.classdesc.maybe_return_immutable_list( attr, s_result) return s_result - def getattr(ins, s_attr): + def getattr(self, s_attr): if s_attr.is_constant() and isinstance(s_attr.const, str): attr = s_attr.const - return ins._true_getattr(attr) + return self._true_getattr(attr) raise AnnotatorError("A variable argument to getattr is not RPython") getattr.can_only_throw = [] - def setattr(ins, s_attr, s_value): + def setattr(self, s_attr, s_value): if s_attr.is_constant() and isinstance(s_attr.const, str): attr = s_attr.const # find the (possibly parent) class where this attr is defined - clsdef = ins.classdef.locate_attribute(attr) + clsdef = self.classdef.locate_attribute(attr) attrdef = clsdef.attrs[attr] attrdef.modified(clsdef) @@ -676,83 +676,83 @@ # create or update the attribute in clsdef clsdef.generalize_attr(attr, s_value) - def bool_behavior(ins, s): - if not ins.can_be_None: + def bool_behavior(self, s): + if not self.can_be_None: s.const = True - def iter(ins): - s_iterable = ins._true_getattr('__iter__') + def iter(self): + s_iterable = self._true_getattr('__iter__') bk = getbookkeeper() # record for calltables bk.emulate_pbc_call(bk.position_key, s_iterable, []) return s_iterable.call(bk.build_args("simple_call", [])) - def next(ins): - s_next = ins._true_getattr('next') + def next(self): + s_next = self._true_getattr('next') bk = getbookkeeper() # record for calltables bk.emulate_pbc_call(bk.position_key, s_next, []) return s_next.call(bk.build_args("simple_call", [])) class __extend__(SomeBuiltin): - def _can_only_throw(bltn, *args): - analyser_func = getattr(bltn.analyser, 'im_func', None) + def _can_only_throw(self, *args): + analyser_func = getattr(self.analyser, 'im_func', None) can_only_throw = getattr(analyser_func, 'can_only_throw', None) if can_only_throw is None or isinstance(can_only_throw, list): return can_only_throw - if bltn.s_self is not None: - return can_only_throw(bltn.s_self, *args) + if self.s_self is not None: + return can_only_throw(self.s_self, *args) else: return can_only_throw(*args) - def simple_call(bltn, *args): - if bltn.s_self is not None: - return bltn.analyser(bltn.s_self, *args) + def simple_call(self, *args): + if self.s_self is not None: + return self.analyser(self.s_self, *args) else: - if bltn.methodname: - getbookkeeper().count(bltn.methodname.replace('.', '_'), *args) - return bltn.analyser(*args) + if self.methodname: + getbookkeeper().count(self.methodname.replace('.', '_'), *args) + return self.analyser(*args) simple_call.can_only_throw = _can_only_throw - def call(bltn, args, implicit_init=False): + def call(self, args, implicit_init=False): args_s, kwds = args.unpack() # prefix keyword arguments with 's_' kwds_s = {} for key, s_value in kwds.items(): kwds_s['s_'+key] = s_value - if bltn.s_self is not None: - return bltn.analyser(bltn.s_self, *args_s, **kwds_s) + if self.s_self is not None: + return self.analyser(self.s_self, *args_s, **kwds_s) else: - return bltn.analyser(*args_s, **kwds_s) + return self.analyser(*args_s, **kwds_s) class __extend__(SomePBC): - def getattr(pbc, s_attr): + def getattr(self, s_attr): bookkeeper = getbookkeeper() - return bookkeeper.pbc_getattr(pbc, s_attr) + return bookkeeper.pbc_getattr(self, s_attr) getattr.can_only_throw = [] - def setattr(pbc, s_attr, s_value): - if not pbc.isNone(): + def setattr(self, s_attr, s_value): + if not self.isNone(): raise AnnotatorError("Cannot modify attribute of a pre-built constant") - def call(pbc, args): + def call(self, args): bookkeeper = getbookkeeper() - return bookkeeper.pbc_call(pbc, args) + return bookkeeper.pbc_call(self, args) - def bind_callables_under(pbc, classdef, name): - d = [desc.bind_under(classdef, name) for desc in pbc.descriptions] - return SomePBC(d, can_be_None=pbc.can_be_None) + def bind_callables_under(self, classdef, name): + d = [desc.bind_under(classdef, name) for desc in self.descriptions] + return SomePBC(d, can_be_None=self.can_be_None) - def bool_behavior(pbc, s): - if pbc.isNone(): + def bool_behavior(self, s): + if self.isNone(): s.const = False - elif not pbc.can_be_None: + elif not self.can_be_None: s.const = True - def len(pbc): - if pbc.isNone(): + def len(self): + if self.isNone(): # this None could later be generalized into an empty list, # whose length is the constant 0; so let's tentatively answer 0. return immutablevalue(0) @@ -766,9 +766,9 @@ class __extend__(SomePtr): - def getattr(p, s_attr): - assert s_attr.is_constant(), "getattr on ptr %r with non-constant field-name" % p.ll_ptrtype - example = p.ll_ptrtype._example() + def getattr(self, s_attr): + assert s_attr.is_constant(), "getattr on ptr %r with non-constant field-name" % self.ll_ptrtype + example = self.ll_ptrtype._example() try: v = example._lookup_adtmeth(s_attr.const) except AttributeError: @@ -783,48 +783,48 @@ return getbookkeeper().immutablevalue(v) getattr.can_only_throw = [] - def len(p): - length = p.ll_ptrtype._example()._fixedlength() + def len(self): + length = self.ll_ptrtype._example()._fixedlength() if length is None: - return SomeObject.len(p) + return SomeObject.len(self) else: return immutablevalue(length) - def setattr(p, s_attr, s_value): # just doing checking - assert s_attr.is_constant(), "setattr on ptr %r with non-constant field-name" % p.ll_ptrtype - example = p.ll_ptrtype._example() + def setattr(self, s_attr, s_value): # just doing checking + assert s_attr.is_constant(), "setattr on ptr %r with non-constant field-name" % self.ll_ptrtype + example = self.ll_ptrtype._example() if getattr(example, s_attr.const) is not None: # ignore Void s_value v_lltype = annotation_to_lltype(s_value) setattr(example, s_attr.const, v_lltype._defl()) - def call(p, args): + def call(self, args): args_s, kwds_s = args.unpack() if kwds_s: raise Exception("keyword arguments to call to a low-level fn ptr") info = 'argument to ll function pointer call' llargs = [annotation_to_lltype(s_arg,info)._defl() for s_arg in args_s] - v = p.ll_ptrtype._example()(*llargs) + v = self.ll_ptrtype._example()(*llargs) return ll_to_annotation(v) - def bool(p): + def bool(self): return s_Bool class __extend__(SomeLLADTMeth): - def call(adtmeth, args): + def call(self, args): bookkeeper = getbookkeeper() - s_func = bookkeeper.immutablevalue(adtmeth.func) - return s_func.call(args.prepend(lltype_to_annotation(adtmeth.ll_ptrtype))) + s_func = bookkeeper.immutablevalue(self.func) + return s_func.call(args.prepend(lltype_to_annotation(self.ll_ptrtype))) #_________________________________________ # weakrefs class __extend__(SomeWeakRef): - def simple_call(s_wrf): - if s_wrf.classdef is None: + def simple_call(self): + if self.classdef is None: return s_None # known to be a dead weakref else: - return SomeInstance(s_wrf.classdef, can_be_None=True) + return SomeInstance(self.classdef, can_be_None=True) #_________________________________________ # memory addresses @@ -832,7 +832,7 @@ from rpython.rtyper.lltypesystem import llmemory class __extend__(SomeAddress): - def getattr(s_addr, s_attr): + def getattr(self, s_attr): assert s_attr.is_constant() assert isinstance(s_attr, SomeString) assert s_attr.const in llmemory.supported_access_types @@ -840,5 +840,5 @@ llmemory.supported_access_types[s_attr.const]) getattr.can_only_throw = [] - def bool(s_addr): + def bool(self): return s_Bool diff --git a/rpython/flowspace/argument.py b/rpython/flowspace/argument.py --- a/rpython/flowspace/argument.py +++ b/rpython/flowspace/argument.py @@ -1,7 +1,7 @@ """ Arguments objects. """ - +from rpython.flowspace.model import const class Signature(object): _immutable_ = True @@ -77,21 +77,37 @@ """Represents the arguments passed into a function call, i.e. the `a, b, *c, **d` part in `return func(a, b, *c, **d)`. """ - def __init__(self, args_w, keywords=None, w_stararg=None, - w_starstararg=None): + def __init__(self, args_w, keywords=None, w_stararg=None): self.w_stararg = w_stararg - assert w_starstararg is None, "No **-unpacking in RPython" assert isinstance(args_w, list) self.arguments_w = args_w self.keywords = keywords or {} + def __repr__(self): + """ NOT_RPYTHON """ + name = self.__class__.__name__ + if not self.keywords: + return '%s(%s)' % (name, self.arguments_w,) + else: + return '%s(%s, %s)' % (name, self.arguments_w, self.keywords) + def flatten(self): """ Argument <-> list of w_objects together with "shape" information """ - shape_cnt = len(self.arguments_w) # Number of positional args - shape_keys = tuple(sorted(self.keywords)) - shape_star = self.w_stararg is not None # Flag: presence of *arg - shape_stst = False # Flag: presence of **kwds + shape_cnt, shape_keys, shape_star = self._rawshape() data_w = self.arguments_w + [self.keywords[key] for key in shape_keys] if shape_star: data_w.append(self.w_stararg) - return (shape_cnt, shape_keys, shape_star, shape_stst), data_w + return (shape_cnt, shape_keys, shape_star), data_w + + def _rawshape(self): + shape_cnt = len(self.arguments_w) + shape_keys = tuple(sorted(self.keywords)) + shape_star = self.w_stararg is not None # Flag: presence of *arg + return shape_cnt, shape_keys, shape_star + + def as_list(self): + assert not self.keywords + if self.w_stararg is None: + return self.arguments_w + else: + return self.arguments_w + [const(x) for x in self.w_stararg.value] diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -9,7 +9,7 @@ from rpython.tool.stdlib_opcode import host_bytecode_spec from rpython.flowspace.argument import CallSpec from rpython.flowspace.model import (Constant, Variable, Block, Link, - c_last_exception, SpaceOperation, const) + c_last_exception, const, FSException) from rpython.flowspace.framestate import (FrameState, recursively_unflatten, recursively_flatten) from rpython.flowspace.specialcase import (rpython_print_item, @@ -30,25 +30,6 @@ class StopFlowing(Exception): pass -class Return(Exception): - def __init__(self, value): - self.value = value - -class FSException(Exception): - def __init__(self, w_type, w_value): - assert w_type is not None - self.w_type = w_type - self.w_value = w_value - - def get_w_value(self, _): - return self.w_value - - def __str__(self): - return '[%s: %s]' % (self.w_type, self.w_value) - -class ImplicitOperationError(FSException): - pass - class BytecodeCorruption(Exception): pass From noreply at buildbot.pypy.org Thu Nov 28 17:23:30 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 28 Nov 2013 17:23:30 +0100 (CET) Subject: [pypy-commit] pypy default: Support the "General unicode category" pattern in rsre. Message-ID: <20131128162330.00A4A1C039A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68337:fc6598a6d83a Date: 2013-11-28 17:22 +0100 http://bitbucket.org/pypy/pypy/changeset/fc6598a6d83a/ Log: Support the "General unicode category" pattern in rsre. diff --git a/rpython/rlib/rsre/rsre_char.py b/rpython/rlib/rsre/rsre_char.py --- a/rpython/rlib/rsre/rsre_char.py +++ b/rpython/rlib/rsre/rsre_char.py @@ -145,13 +145,11 @@ result = False while True: opcode = pattern[ppos] - i = 0 - for function in set_dispatch_unroll: - if function is not None and opcode == i: + for i, function in set_dispatch_unroll: + if opcode == i: newresult, ppos = function(pattern, ppos, char_code) result |= newresult break - i = i + 1 else: if opcode == 0: # FAILURE break @@ -225,13 +223,42 @@ index += count * (32 / CODESIZE) # skip blocks return match, index -set_dispatch_table = [ - None, # FAILURE - None, None, None, None, None, None, None, None, - set_category, set_charset, set_bigcharset, None, None, None, - None, None, None, None, set_literal, None, None, None, None, - None, None, - None, # NEGATE - set_range -] -set_dispatch_unroll = unrolling_iterable(set_dispatch_table) +def set_unicode_general_category(pat, index, char_code): + # Unicode "General category property code" (not used by Python). + # A general category is two letters. 'pat[index+1]' contains both + # the first character, and the second character shifted by 8. + # http://en.wikipedia.org/wiki/Unicode_character_property#General_Category + # Also supports single-character categories, if the second character is 0. + # Negative matches are triggered by bit number 7. + assert unicodedb is not None + cat = unicodedb.category(char_code) + category_code = pat[index + 1] + first_character = category_code & 0x7F + second_character = (category_code >> 8) & 0x7F + negative_match = category_code & 0x80 + # + if second_character == 0: + # single-character match + check = ord(cat[0]) + expected = first_character + else: + # two-characters match + check = ord(cat[0]) | (ord(cat[1]) << 8) + expected = first_character | (second_character << 8) + # + if negative_match: + result = check != expected + else: + result = check == expected + # + return result, index + 2 + +set_dispatch_table = { + 9: set_category, + 10: set_charset, + 11: set_bigcharset, + 19: set_literal, + 27: set_range, + 70: set_unicode_general_category, +} +set_dispatch_unroll = unrolling_iterable(sorted(set_dispatch_table.items())) diff --git a/rpython/rlib/rsre/rsre_core.py b/rpython/rlib/rsre/rsre_core.py --- a/rpython/rlib/rsre/rsre_core.py +++ b/rpython/rlib/rsre/rsre_core.py @@ -19,7 +19,7 @@ #OPCODE_CALL = 8 OPCODE_CATEGORY = 9 OPCODE_CHARSET = 10 -#OPCODE_BIGCHARSET = 11 +OPCODE_BIGCHARSET = 11 OPCODE_GROUPREF = 12 OPCODE_GROUPREF_EXISTS = 13 OPCODE_GROUPREF_IGNORE = 14 @@ -41,6 +41,9 @@ #OPCODE_SUBPATTERN = 30 OPCODE_MIN_REPEAT_ONE = 31 +# not used by Python itself +OPCODE_UNICODE_GENERAL_CATEGORY = 70 + # ____________________________________________________________ _seen_specname = {} diff --git a/rpython/rlib/rsre/test/test_char.py b/rpython/rlib/rsre/test/test_char.py --- a/rpython/rlib/rsre/test/test_char.py +++ b/rpython/rlib/rsre/test/test_char.py @@ -126,3 +126,44 @@ assert cat(CHCODES["category_uni_not_digit"], ROMAN_NUMERAL) assert cat(CHCODES["category_uni_not_digit"], CIRCLED_NUMBER) assert cat(CHCODES["category_uni_not_digit"], DINGBAT_CIRCLED) + + +def test_general_category(): + from rpython.rlib.unicodedata import unicodedb + + for cat, positive, negative in [('L', u'aZ\xe9', u'. ?'), + ('P', u'.?', u'aZ\xe9 ')]: + pat_pos = [70, ord(cat), 0] + pat_neg = [70, ord(cat) | 0x80, 0] + for c in positive: + assert unicodedb.category(ord(c)).startswith(cat) + assert rsre_char.check_charset(pat_pos, 0, ord(c)) + assert not rsre_char.check_charset(pat_neg, 0, ord(c)) + for c in negative: + assert not unicodedb.category(ord(c)).startswith(cat) + assert not rsre_char.check_charset(pat_pos, 0, ord(c)) + assert rsre_char.check_charset(pat_neg, 0, ord(c)) + + def cat2num(cat): + return ord(cat[0]) | (ord(cat[1]) << 8) + + for cat, positive, negative in [('Lu', u'A', u'z\xe9 '), + ('Ll', u'z\xe9', u'A \n')]: + pat_pos = [70, cat2num(cat), 0] + pat_neg = [70, cat2num(cat) | 0x80, 0] + for c in positive: + assert unicodedb.category(ord(c)) == cat + assert rsre_char.check_charset(pat_pos, 0, ord(c)) + assert not rsre_char.check_charset(pat_neg, 0, ord(c)) + for c in negative: + assert unicodedb.category(ord(c)) != cat + assert not rsre_char.check_charset(pat_pos, 0, ord(c)) + assert rsre_char.check_charset(pat_neg, 0, ord(c)) + + # test for how the common 'L&' pattern might be compiled + pat = [70, cat2num('Lu'), 70, cat2num('Ll'), 70, cat2num('Lt'), 0] + assert rsre_char.check_charset(pat, 0, 65) # Lu + assert rsre_char.check_charset(pat, 0, 99) # Ll + assert rsre_char.check_charset(pat, 0, 453) # Lt + assert not rsre_char.check_charset(pat, 0, 688) # Lm + assert not rsre_char.check_charset(pat, 0, 5870) # Nl From noreply at buildbot.pypy.org Fri Nov 29 06:39:40 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 29 Nov 2013 06:39:40 +0100 (CET) Subject: [pypy-commit] pypy default: test, change the name of the windows import library created during translation, Message-ID: <20131129053940.A28361C01CB@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r68338:a1989cb701a7 Date: 2013-11-29 07:38 +0200 http://bitbucket.org/pypy/pypy/changeset/a1989cb701a7/ Log: test, change the name of the windows import library created during translation, fixes some failing lib-python nightly tests diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -33,14 +33,13 @@ # set link options output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + # XXX pyconfig.h uses a pragma to link to the import library, + # which is currently python27.lib + library = os.path.join(thisdir, '..', 'include', 'python27') if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + # For a local translation or nightly build + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') + assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 '/EXPORT:init' + modulename] diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -470,7 +470,8 @@ return py.path.local(newexename) def create_exe(self): - """ Copy the compiled executable into translator/goal + """ Copy the compiled executable into current directory, which is + pypy/goal on nightly builds """ if self.exe_name is not None: exename = self.c_entryp @@ -482,8 +483,11 @@ shutil.copy(str(soname), str(newsoname)) self.log.info("copied: %s" % (newsoname,)) if sys.platform == 'win32': - shutil.copyfile(str(soname.new(ext='lib')), - str(newsoname.new(ext='lib'))) + # the import library is named python27.lib, according + # to the pragma in pyconfig.h + libname = str(newsoname.dirpath().join('python27.lib')) + shutil.copyfile(str(soname.new(ext='lib')), libname) + self.log.info("copied: %s" % (libname,)) self.c_entryp = newexename self.log.info('usession directory: %s' % (udir,)) self.log.info("created: %s" % (self.c_entryp,)) diff --git a/rpython/translator/test/test_driver.py b/rpython/translator/test/test_driver.py --- a/rpython/translator/test/test_driver.py +++ b/rpython/translator/test/test_driver.py @@ -1,6 +1,7 @@ import py - +import os from rpython.translator.driver import TranslationDriver +from rpython.tool.udir import udir def test_ctr(): td = TranslationDriver() @@ -44,3 +45,33 @@ 'compile_c', 'pyjitpl'] assert set(td.exposed) == set(expected) + + +def test_create_exe(): + if not os.name == 'nt': + py.skip('Windows only test') + + dst_name = udir.join('dst/pypy.exe') + src_name = udir.join('src/dydy2.exe') + dll_name = udir.join('src/pypy.dll') + lib_name = udir.join('src/pypy.lib') + src_name.ensure() + src_name.write('exe') + dll_name.ensure() + dll_name.write('dll') + lib_name.ensure() + lib_name.write('lib') + dst_name.ensure() + + class CBuilder(object): + shared_library_name = dll_name + + td = TranslationDriver(exe_name=str(dst_name)) + td.c_entryp = str(src_name) + td.cbuilder = CBuilder() + td.create_exe() + assert dst_name.read() == 'exe' + assert dst_name.new(ext='dll').read() == 'dll' + assert dst_name.new(purebasename='python27',ext='lib').read() == 'lib' + + From noreply at buildbot.pypy.org Fri Nov 29 10:49:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 29 Nov 2013 10:49:57 +0100 (CET) Subject: [pypy-commit] pypy default: Try to report where a sys.exit(1) comes from Message-ID: <20131129094957.139951C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68339:1241202d156d Date: 2013-11-29 10:49 +0100 http://bitbucket.org/pypy/pypy/changeset/1241202d156d/ Log: Try to report where a sys.exit(1) comes from diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -159,6 +159,8 @@ else: msg = "Killed by %s." % getsignalname(-exitcode) extralog = "! %s\n %s\n" % (test, msg) + else: + extralog = " somefailed=True in %s\n" % (test,) else: failure = False return failure, extralog @@ -261,7 +263,8 @@ done += 1 failure = failure or somefailed - heading = "__ %s [%d done in total] " % (testname, done) + heading = "__ %s [%d done in total, somefailed=%s] " % ( + testname, done, somefailed) out.write(heading + (79-len(heading))*'_'+'\n') From noreply at buildbot.pypy.org Fri Nov 29 10:56:28 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 29 Nov 2013 10:56:28 +0100 (CET) Subject: [pypy-commit] pypy default: Test fix Message-ID: <20131129095628.D36F51C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68340:d210e91cf527 Date: 2013-11-29 10:55 +0100 http://bitbucket.org/pypy/pypy/changeset/d210e91cf527/ Log: Test fix diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -160,7 +160,7 @@ msg = "Killed by %s." % getsignalname(-exitcode) extralog = "! %s\n %s\n" % (test, msg) else: - extralog = " somefailed=True in %s\n" % (test,) + extralog = " (somefailed=True in %s)\n" % (test,) else: failure = False return failure, extralog diff --git a/testrunner/test/test_runner.py b/testrunner/test/test_runner.py --- a/testrunner/test/test_runner.py +++ b/testrunner/test/test_runner.py @@ -171,7 +171,7 @@ failure, extralog = runner.interpret_exitcode(1, "test_foo", "F Foo\n") assert failure - assert extralog == "" + assert extralog == " (somefailed=True in test_foo)\n" failure, extralog = runner.interpret_exitcode(2, "test_foo") assert failure From noreply at buildbot.pypy.org Fri Nov 29 11:55:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 29 Nov 2013 11:55:43 +0100 (CET) Subject: [pypy-commit] pypy default: Lower the (virtual) memory requirements for this test. I think it used Message-ID: <20131129105543.42C311C01B0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68341:b229f0aafb29 Date: 2013-11-29 11:53 +0100 http://bitbucket.org/pypy/pypy/changeset/b229f0aafb29/ Log: Lower the (virtual) memory requirements for this test. I think it used to require more than 3GB, which fails on some 32-bit platforms. diff --git a/rpython/rlib/test/test_rstacklet.py b/rpython/rlib/test/test_rstacklet.py --- a/rpython/rlib/test/test_rstacklet.py +++ b/rpython/rlib/test/test_rstacklet.py @@ -74,8 +74,8 @@ h = self.sthread.new(switchbackonce_callback, rffi.cast(llmemory.Address, 321)) # 'h' ignored - if (i % 5000) == 2500: - rgc.collect() + if (i % 2000) == 1000: + rgc.collect() # This should run in < 1.5GB virtual memory def any_alive(self): for task in self.tasks: From noreply at buildbot.pypy.org Fri Nov 29 23:55:31 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 29 Nov 2013 23:55:31 +0100 (CET) Subject: [pypy-commit] cffi default: Document some limitations of ffi.gc(), particularly on PyPy. Message-ID: <20131129225531.514A71C01B0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1433:4480f85a1279 Date: 2013-11-29 23:55 +0100 http://bitbucket.org/cffi/cffi/changeset/4480f85a1279/ Log: Document some limitations of ffi.gc(), particularly on PyPy. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1240,6 +1240,17 @@ object is garbage-collected. *New in version 0.3* (together with the fact that any cdata object can be weakly referenced). +Note that this should be avoided for large memory allocations or +for limited resources. This is particularly true on PyPy: its GC does +not know how much memory or how many resources the returned ``ptr`` +holds. It will only run its GC when enough memory it knows about has +been allocated (and thus run the destructor possibly later than you +would expect). Moreover, the destructor is called in whatever thread +PyPy is at that moment, which might be a problem for some C libraries. +In these cases, consider writing a wrapper class with custom ``__enter__()`` +and ``__exit__()`` methods that allocate and free the C data at known +points in time, and using it in a ``with`` statement. + .. "versionadded:: 0.3" --- inlined in the previous paragraph ``ffi.new_handle(python_object)``: return a non-NULL cdata of type From noreply at buildbot.pypy.org Fri Nov 29 23:56:44 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 29 Nov 2013 23:56:44 +0100 (CET) Subject: [pypy-commit] cffi release-0.8: Document some limitations of ffi.gc(), particularly on PyPy. Message-ID: <20131129225644.A5F491C01B0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1434:e11a35d1fa1a Date: 2013-11-29 23:55 +0100 http://bitbucket.org/cffi/cffi/changeset/e11a35d1fa1a/ Log: Document some limitations of ffi.gc(), particularly on PyPy. (transplanted from 4480f85a1279b0f13803821265a096b35b40cefa) diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1240,6 +1240,17 @@ object is garbage-collected. *New in version 0.3* (together with the fact that any cdata object can be weakly referenced). +Note that this should be avoided for large memory allocations or +for limited resources. This is particularly true on PyPy: its GC does +not know how much memory or how many resources the returned ``ptr`` +holds. It will only run its GC when enough memory it knows about has +been allocated (and thus run the destructor possibly later than you +would expect). Moreover, the destructor is called in whatever thread +PyPy is at that moment, which might be a problem for some C libraries. +In these cases, consider writing a wrapper class with custom ``__enter__()`` +and ``__exit__()`` methods that allocate and free the C data at known +points in time, and using it in a ``with`` statement. + .. "versionadded:: 0.3" --- inlined in the previous paragraph ``ffi.new_handle(python_object)``: return a non-NULL cdata of type From noreply at buildbot.pypy.org Sat Nov 30 09:50:26 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Nov 2013 09:50:26 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Sprint announcement Message-ID: <20131130085026.850D41C0205@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5110:3f091dcef9c8 Date: 2013-11-30 09:49 +0100 http://bitbucket.org/pypy/extradoc/changeset/3f091dcef9c8/ Log: Sprint announcement diff --git a/sprintinfo/leysin-winter-2014/announcement.txt b/sprintinfo/leysin-winter-2014/announcement.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/leysin-winter-2014/announcement.txt @@ -0,0 +1,62 @@ +===================================================================== + PyPy Leysin Winter Sprint (11-19st January 2014) +===================================================================== + +The next PyPy sprint will be in Leysin, Switzerland, for the +ninth time. This is a fully public sprint: newcomers and topics +other than those proposed below are welcome. + +------------------------------ +Goals and topics of the sprint +------------------------------ + +* Py3k: work towards supporting Python 3 in PyPy + +* NumPyPy: work towards supporting the numpy module in PyPy + +* STM: work towards supporting Software Transactional Memory + +* And as usual, the main side goal is to have fun in winter sports :-) + We can take a day off for ski. + +----------- +Exact times +----------- + +For a change, and as an attempt to simplify things, I specified the +dates as 11-19 January 2014, where 11 and 19 are travel days. We will +work full days between the 12 and the 18. You are of course allowed +to show up for a part of that time only, too. + +----------------------- +Location & Accomodation +----------------------- + +Leysin, Switzerland, "same place as before". Let me refresh your +memory: both the sprint venue and the lodging will be in a very spacious +pair of chalets built specifically for bed & breakfast: +http://www.ermina.ch/. The place has a good ADSL Internet connexion +with wireless installed. You can of course arrange your own +lodging anywhere (as long as you are in Leysin, you cannot be more than a +15 minutes walk away from the sprint venue), but I definitely recommend +lodging there too -- you won't find a better view anywhere else (though you +probably won't get much worse ones easily, either :-) + +Please *confirm* that you are coming so that we can adjust the reservations +as appropriate. The rate so far has been around 60 CHF a night all included +in 2-person rooms, with breakfast. There are larger rooms too (less +expensive per person) and maybe the possibility to get a single room if you +really want to. + +Please register by Mercurial:: + + https://bitbucket.org/pypy/extradoc/ + https://bitbucket.org/pypy/extradoc/raw/extradoc/sprintinfo/leysin-winter-2014 + +or on the pypy-dev mailing list if you do not yet have check-in rights: + + http://mail.python.org/mailman/listinfo/pypy-dev + +You need a Swiss-to-(insert country here) power adapter. There will be +some Swiss-to-EU adapters around -- bring a EU-format power strip if you +have one. diff --git a/sprintinfo/leysin-winter-2014/people.txt b/sprintinfo/leysin-winter-2014/people.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/leysin-winter-2014/people.txt @@ -0,0 +1,58 @@ + +People coming to the Leysin sprint Winter 2014 +================================================== + +People who have a ``?`` in their arrive/depart or accomodation +column are known to be coming but there are no details +available yet from them. + + +==================== ============== ======================= + Name Arrive/Depart Accomodation +==================== ============== ======================= +Armin Rigo private +==================== ============== ======================= + + +People on the following list were present at previous sprints: + +==================== ============== ===================== + Name Arrive/Depart Accomodation +==================== ============== ===================== +Romain Guillebert ? ? +Antonio Cuni ? ? +Michael Foord ? ? +Maciej Fijalkowski ? ? +David Schneider ? ? +Jacob Hallen ? ? +Laura Creighton ? ? +Hakan Ardo ? ? +Carl Friedrich Bolz ? ? +Samuele Pedroni ? ? +Anders Hammarquist ? ? +Christian Tismer ? ? +Niko Matsakis ? ? +Toby Watson ? ? +Paul deGrandis ? ? +Michael Hudson ? ? +Anders Lehmann ? ? +Niklaus Haldimann ? ? +Lene Wagner ? ? +Amaury Forgeot d'Arc ? ? +Valentino Volonghi ? ? +Boris Feigin ? ? +Andrew Thompson ? ? +Bert Freudenberg ? ? +Beatrice Duering ? ? +Richard Emslie ? ? +Johan Hahn ? ? +Stephan Diehl ? ? +Alexander Schremmer ? ? +Anders Chrigstroem ? ? +Eric van Riet Paap ? ? +Holger Krekel ? ? +Guido Wesdorp ? ? +Leonardo Santagada ? ? +Alexandre Fayolle ? ? +Sylvain Th�nault ? ? +==================== ============== ===================== From noreply at buildbot.pypy.org Sat Nov 30 09:52:32 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Nov 2013 09:52:32 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Fill to 72 columns Message-ID: <20131130085232.2907C1C0205@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5111:63b56fcf1203 Date: 2013-11-30 09:52 +0100 http://bitbucket.org/pypy/extradoc/changeset/63b56fcf1203/ Log: Fill to 72 columns diff --git a/sprintinfo/leysin-winter-2014/announcement.txt b/sprintinfo/leysin-winter-2014/announcement.txt --- a/sprintinfo/leysin-winter-2014/announcement.txt +++ b/sprintinfo/leysin-winter-2014/announcement.txt @@ -2,9 +2,9 @@ PyPy Leysin Winter Sprint (11-19st January 2014) ===================================================================== -The next PyPy sprint will be in Leysin, Switzerland, for the -ninth time. This is a fully public sprint: newcomers and topics -other than those proposed below are welcome. +The next PyPy sprint will be in Leysin, Switzerland, for the ninth time. +This is a fully public sprint: newcomers and topics other than those +proposed below are welcome. ------------------------------ Goals and topics of the sprint @@ -25,8 +25,8 @@ For a change, and as an attempt to simplify things, I specified the dates as 11-19 January 2014, where 11 and 19 are travel days. We will -work full days between the 12 and the 18. You are of course allowed -to show up for a part of that time only, too. +work full days between the 12 and the 18. You are of course allowed to +show up for a part of that time only, too. ----------------------- Location & Accomodation @@ -36,17 +36,17 @@ memory: both the sprint venue and the lodging will be in a very spacious pair of chalets built specifically for bed & breakfast: http://www.ermina.ch/. The place has a good ADSL Internet connexion -with wireless installed. You can of course arrange your own -lodging anywhere (as long as you are in Leysin, you cannot be more than a -15 minutes walk away from the sprint venue), but I definitely recommend -lodging there too -- you won't find a better view anywhere else (though you -probably won't get much worse ones easily, either :-) +with wireless installed. You can of course arrange your own lodging +anywhere (as long as you are in Leysin, you cannot be more than a 15 +minutes walk away from the sprint venue), but I definitely recommend +lodging there too -- you won't find a better view anywhere else (though +you probably won't get much worse ones easily, either :-) -Please *confirm* that you are coming so that we can adjust the reservations -as appropriate. The rate so far has been around 60 CHF a night all included -in 2-person rooms, with breakfast. There are larger rooms too (less -expensive per person) and maybe the possibility to get a single room if you -really want to. +Please *confirm* that you are coming so that we can adjust the +reservations as appropriate. The rate so far has been around 60 CHF a +night all included in 2-person rooms, with breakfast. There are larger +rooms too (less expensive per person) and maybe the possibility to get a +single room if you really want to. Please register by Mercurial:: From noreply at buildbot.pypy.org Sat Nov 30 14:53:35 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 30 Nov 2013 14:53:35 +0100 (CET) Subject: [pypy-commit] pypy default: Move evil conftest import from module- to function-level Message-ID: <20131130135335.0246F1C01B0@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r68342:6bce07544ff7 Date: 2013-11-30 13:52 +0000 http://bitbucket.org/pypy/pypy/changeset/6bce07544ff7/ Log: Move evil conftest import from module- to function-level diff --git a/rpython/tool/gcc_cache.py b/rpython/tool/gcc_cache.py --- a/rpython/tool/gcc_cache.py +++ b/rpython/tool/gcc_cache.py @@ -1,15 +1,11 @@ -from rpython.translator.platform import CompilationError -from rpython.conftest import cache_dir from hashlib import md5 import py, os -cache_dir_root = py.path.local(cache_dir).ensure(dir=1) - -def cache_file_path(c_files, eci, cachename): +def cache_file_path(c_files, eci, cache_root, cachename): "Builds a filename to cache compilation data" # Import 'platform' every time, the compiler may have been changed from rpython.translator.platform import platform - cache_dir = cache_dir_root.join(cachename).ensure(dir=1) + cache_dir = cache_root.join(cachename).ensure(dir=1) filecontents = [c_file.read() for c_file in c_files] key = repr((filecontents, eci, platform.key())) hash = md5(key).hexdigest() @@ -19,7 +15,9 @@ "Builds and run a program; caches the result" # Import 'platform' every time, the compiler may have been changed from rpython.translator.platform import platform - path = cache_file_path(c_files, eci, 'build_executable_cache') + from rpython.conftest import cache_dir + cache_root = py.path.local(cache_dir).ensure(dir=1) + path = cache_file_path(c_files, eci, cache_root, 'build_executable_cache') try: return path.read() except py.error.Error: @@ -56,7 +54,9 @@ "Try to compile a program. If it works, caches this fact." # Import 'platform' every time, the compiler may have been changed from rpython.translator.platform import platform - path = cache_file_path(c_files, eci, 'try_compile_cache') + from rpython.conftest import cache_dir + cache_root = py.path.local(cache_dir).ensure(dir=1) + path = cache_file_path(c_files, eci, cache_root, 'try_compile_cache') try: data = path.read() if data == 'True': diff --git a/rpython/tool/test/test_gcc_cache.py b/rpython/tool/test/test_gcc_cache.py --- a/rpython/tool/test/test_gcc_cache.py +++ b/rpython/tool/test/test_gcc_cache.py @@ -1,11 +1,17 @@ import sys -from rpython.tool.gcc_cache import * +import cStringIO +import py from rpython.tool.udir import udir -import md5, cStringIO from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.translator.platform import CompilationError +from rpython.tool.gcc_cache import ( + cache_file_path, build_executable_cache, try_compile_cache) localudir = udir.join('test_gcc_cache').ensure(dir=1) +from rpython.conftest import cache_dir +cache_root = py.path.local(cache_dir).ensure(dir=1) + def test_gcc_exec(): f = localudir.join("x.c") f.write(""" @@ -23,7 +29,7 @@ dir2.join('test_gcc_exec.h').write('#define ANSWER 42\n') eci = ExternalCompilationInfo(include_dirs=[str(dir1)]) # remove cache - path = cache_file_path([f], eci, 'build_executable_cache') + path = cache_file_path([f], eci, cache_root, 'build_executable_cache') if path.check(): path.remove() res = build_executable_cache([f], eci) @@ -54,7 +60,7 @@ dir2.join('test_gcc_ask.h').write('#error boom\n') eci = ExternalCompilationInfo(include_dirs=[str(dir1)]) # remove cache - path = cache_file_path([f], eci, 'try_compile_cache') + path = cache_file_path([f], eci, cache_root, 'try_compile_cache') if path.check(): path.remove() assert try_compile_cache([f], eci) @@ -90,4 +96,3 @@ finally: sys.stderr = oldstderr assert 'ERROR' not in capture.getvalue().upper() - From noreply at buildbot.pypy.org Sat Nov 30 15:41:24 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Sat, 30 Nov 2013 15:41:24 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add myself Message-ID: <20131130144124.3BF961C0095@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5112:154de39968e1 Date: 2013-11-30 15:40 +0100 http://bitbucket.org/pypy/extradoc/changeset/154de39968e1/ Log: Add myself diff --git a/sprintinfo/leysin-winter-2014/people.txt b/sprintinfo/leysin-winter-2014/people.txt --- a/sprintinfo/leysin-winter-2014/people.txt +++ b/sprintinfo/leysin-winter-2014/people.txt @@ -11,6 +11,7 @@ Name Arrive/Depart Accomodation ==================== ============== ======================= Armin Rigo private +Romain Guillebert 11-19 Ermina ==================== ============== ======================= From noreply at buildbot.pypy.org Sat Nov 30 16:07:18 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 30 Nov 2013 16:07:18 +0100 (CET) Subject: [pypy-commit] pypy default: stick cache_dir definition in rpython.config.translationoption (as a global) Message-ID: <20131130150718.85CD81C01F4@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r68343:622d676a349e Date: 2013-11-30 15:06 +0000 http://bitbucket.org/pypy/pypy/changeset/622d676a349e/ Log: stick cache_dir definition in rpython.config.translationoption (as a global) diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -1,4 +1,5 @@ import sys +import os from rpython.config.config import OptionDescription, BoolOption, IntOption, ArbitraryOption, FloatOption from rpython.config.config import ChoiceOption, StrOption, Config from rpython.config.config import ConfigError @@ -20,6 +21,9 @@ IS_64_BITS = sys.maxint > 2147483647 +MAINDIR = os.path.dirname(os.path.dirname(__file__)) +CACHE_DIR = os.path.realpath(os.path.join(MAINDIR, '_cache')) + PLATFORMS = [ 'maemo', 'host', diff --git a/rpython/conftest.py b/rpython/conftest.py --- a/rpython/conftest.py +++ b/rpython/conftest.py @@ -5,7 +5,6 @@ pytest_plugins = 'rpython.tool.pytest.expecttest' cdir = realpath(join(dirname(__file__), 'translator', 'c')) -cache_dir = realpath(join(dirname(__file__), '_cache')) option = None def braindead_deindent(self): diff --git a/rpython/tool/gcc_cache.py b/rpython/tool/gcc_cache.py --- a/rpython/tool/gcc_cache.py +++ b/rpython/tool/gcc_cache.py @@ -1,10 +1,12 @@ from hashlib import md5 import py, os -def cache_file_path(c_files, eci, cache_root, cachename): +def cache_file_path(c_files, eci, cachename): "Builds a filename to cache compilation data" # Import 'platform' every time, the compiler may have been changed from rpython.translator.platform import platform + from rpython.config.translationoption import CACHE_DIR + cache_root = py.path.local(CACHE_DIR).ensure(dir=1) cache_dir = cache_root.join(cachename).ensure(dir=1) filecontents = [c_file.read() for c_file in c_files] key = repr((filecontents, eci, platform.key())) @@ -15,9 +17,7 @@ "Builds and run a program; caches the result" # Import 'platform' every time, the compiler may have been changed from rpython.translator.platform import platform - from rpython.conftest import cache_dir - cache_root = py.path.local(cache_dir).ensure(dir=1) - path = cache_file_path(c_files, eci, cache_root, 'build_executable_cache') + path = cache_file_path(c_files, eci, 'build_executable_cache') try: return path.read() except py.error.Error: @@ -54,9 +54,7 @@ "Try to compile a program. If it works, caches this fact." # Import 'platform' every time, the compiler may have been changed from rpython.translator.platform import platform - from rpython.conftest import cache_dir - cache_root = py.path.local(cache_dir).ensure(dir=1) - path = cache_file_path(c_files, eci, cache_root, 'try_compile_cache') + path = cache_file_path(c_files, eci, 'try_compile_cache') try: data = path.read() if data == 'True': diff --git a/rpython/tool/test/test_gcc_cache.py b/rpython/tool/test/test_gcc_cache.py --- a/rpython/tool/test/test_gcc_cache.py +++ b/rpython/tool/test/test_gcc_cache.py @@ -9,9 +9,6 @@ localudir = udir.join('test_gcc_cache').ensure(dir=1) -from rpython.conftest import cache_dir -cache_root = py.path.local(cache_dir).ensure(dir=1) - def test_gcc_exec(): f = localudir.join("x.c") f.write(""" @@ -29,7 +26,7 @@ dir2.join('test_gcc_exec.h').write('#define ANSWER 42\n') eci = ExternalCompilationInfo(include_dirs=[str(dir1)]) # remove cache - path = cache_file_path([f], eci, cache_root, 'build_executable_cache') + path = cache_file_path([f], eci, 'build_executable_cache') if path.check(): path.remove() res = build_executable_cache([f], eci) @@ -60,7 +57,7 @@ dir2.join('test_gcc_ask.h').write('#error boom\n') eci = ExternalCompilationInfo(include_dirs=[str(dir1)]) # remove cache - path = cache_file_path([f], eci, cache_root, 'try_compile_cache') + path = cache_file_path([f], eci, 'try_compile_cache') if path.check(): path.remove() assert try_compile_cache([f], eci) diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -7,19 +7,17 @@ import os import sys -from rpython.conftest import cache_dir - import py -# clean up early rpython/_cache -try: - py.path.local(cache_dir).remove() -except Exception: - pass - from rpython.config.config import (to_optparse, OptionDescription, BoolOption, ArbitraryOption, StrOption, IntOption, Config, ChoiceOption, OptHelpFormatter) from rpython.config.translationoption import (get_combined_translation_config, - set_opt_level, OPT_LEVELS, DEFAULT_OPT_LEVEL, set_platform) + set_opt_level, OPT_LEVELS, DEFAULT_OPT_LEVEL, set_platform, CACHE_DIR) + +# clean up early rpython/_cache +try: + py.path.local(CACHE_DIR).remove() +except Exception: + pass GOALS = [ From noreply at buildbot.pypy.org Sat Nov 30 17:30:26 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 30 Nov 2013 17:30:26 +0100 (CET) Subject: [pypy-commit] pypy default: whoops Message-ID: <20131130163026.7983F1C01CB@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r68344:c1428cb45c9f Date: 2013-11-29 16:21 +0200 http://bitbucket.org/pypy/pypy/changeset/c1428cb45c9f/ Log: whoops diff --git a/rpython/translator/test/test_driver.py b/rpython/translator/test/test_driver.py --- a/rpython/translator/test/test_driver.py +++ b/rpython/translator/test/test_driver.py @@ -49,7 +49,7 @@ def test_create_exe(): if not os.name == 'nt': - py.skip('Windows only test') + py.test.skip('Windows only test') dst_name = udir.join('dst/pypy.exe') src_name = udir.join('src/dydy2.exe') From noreply at buildbot.pypy.org Sat Nov 30 17:30:27 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 30 Nov 2013 17:30:27 +0100 (CET) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <20131130163027.AAA8E1C01CB@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r68345:68e498c271ef Date: 2013-11-29 16:22 +0200 http://bitbucket.org/pypy/pypy/changeset/68e498c271ef/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -18,3 +18,7 @@ .. branch: voidtype_strformat Better support for record numpy arrays + +.. branch: osx-eci-frameworks-makefile +OSX: Ensure frameworks end up in Makefile when specified in External compilation info + From noreply at buildbot.pypy.org Sat Nov 30 21:53:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Nov 2013 21:53:50 +0100 (CET) Subject: [pypy-commit] cffi default: An extra test Message-ID: <20131130205350.415631C0095@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1435:de7e8aec4001 Date: 2013-11-30 21:53 +0100 http://bitbucket.org/cffi/cffi/changeset/de7e8aec4001/ Log: An extra test diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3136,6 +3136,12 @@ py.test.raises(TypeError, "p + cast(new_primitive_type('int'), 42)") py.test.raises(TypeError, "p - cast(new_primitive_type('int'), 42)") +def test_sizeof_sliced_array(): + BInt = new_primitive_type("int") + BArray = new_array_type(new_pointer_type(BInt), 10) + p = newp(BArray, None) + assert sizeof(p[2:9]) == 7 * sizeof(BInt) + def test_version(): # this test is here mostly for PyPy From noreply at buildbot.pypy.org Sat Nov 30 21:54:40 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Nov 2013 21:54:40 +0100 (CET) Subject: [pypy-commit] pypy default: Update to cffi/de7e8aec4001 and fix Message-ID: <20131130205440.3058C1C0095@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68346:cee23d5e33f9 Date: 2013-11-30 21:54 +0100 http://bitbucket.org/pypy/pypy/changeset/cee23d5e33f9/ Log: Update to cffi/de7e8aec4001 and fix diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -424,6 +424,12 @@ def get_array_length(self): return self.length + def _sizeof(self): + from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray + ctype = self.ctype + assert isinstance(ctype, W_CTypePtrOrArray) + return self.length * ctype.ctitem.size + class W_CDataHandle(W_CData): _attrs_ = ['w_keepalive'] diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3125,6 +3125,12 @@ py.test.raises(TypeError, "p + cast(new_primitive_type('int'), 42)") py.test.raises(TypeError, "p - cast(new_primitive_type('int'), 42)") +def test_sizeof_sliced_array(): + BInt = new_primitive_type("int") + BArray = new_array_type(new_pointer_type(BInt), 10) + p = newp(BArray, None) + assert sizeof(p[2:9]) == 7 * sizeof(BInt) + def test_version(): # this test is here mostly for PyPy From noreply at buildbot.pypy.org Sat Nov 30 22:58:06 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 30 Nov 2013 22:58:06 +0100 (CET) Subject: [pypy-commit] pypy default: Use a set here -- not a dict with bogus fields Message-ID: <20131130215806.8A9541C01B0@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68347:c4debf511df5 Date: 2013-11-30 15:55 -0600 http://bitbucket.org/pypy/pypy/changeset/c4debf511df5/ Log: Use a set here -- not a dict with bogus fields diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -135,7 +135,7 @@ self.delayedreprs = {} self.delayedconsts = [] self.delayedfuncs = [] - self.newgraphs = {} + self.newgraphs = set() def getgraph(self, ll_function, args_s, s_result): # get the graph of the mix-level helper ll_function and prepare it for @@ -235,7 +235,7 @@ ann.annotated[graph.returnblock] = graph s_function = bk.immutablevalue(ll_function) bk.emulate_pbc_call(graph, s_function, args_s) - self.newgraphs[graph] = True + self.newgraphs.add(graph) ann.complete_helpers(self.policy) for ll_function, graph, args_s, s_result in self.pending: s_real_result = ann.binding(graph.getreturnvar()) @@ -246,7 +246,7 @@ (graph, s_result, s_real_result)) del self.pending[:] for graph in translator.graphs[original_graph_count:]: - self.newgraphs[graph] = True + self.newgraphs.add(graph) def finish_rtype(self): rtyper = self.rtyper @@ -260,7 +260,7 @@ p._become(repr.convert_const(obj)) rtyper.call_all_setups() for p, graph in self.delayedfuncs: - self.newgraphs[graph] = True + self.newgraphs.add(graph) real_p = rtyper.getcallable(graph) REAL = lltype.typeOf(real_p).TO FUNCTYPE = lltype.typeOf(p).TO @@ -273,13 +273,13 @@ del self.delayedconsts[:] del self.delayedfuncs[:] for graph in translator.graphs[original_graph_count:]: - self.newgraphs[graph] = True + self.newgraphs.add(graph) def backend_optimize(self, **flags): # only optimize the newly created graphs from rpython.translator.backendopt.all import backend_optimizations translator = self.rtyper.annotator.translator - newgraphs = self.newgraphs.keys() + newgraphs = list(self.newgraphs) backend_optimizations(translator, newgraphs, secondary=True, inline_graph_from_anywhere=True, **flags) self.newgraphs.clear() From noreply at buildbot.pypy.org Sat Nov 30 22:58:07 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 30 Nov 2013 22:58:07 +0100 (CET) Subject: [pypy-commit] pypy default: Replaced another use of a dict with bogus values with a set Message-ID: <20131130215807.C295D1C01B0@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68348:781d223211cb Date: 2013-11-30 15:57 -0600 http://bitbucket.org/pypy/pypy/changeset/781d223211cb/ Log: Replaced another use of a dict with bogus values with a set diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -132,7 +132,7 @@ self.rtyper = rtyper self.policy = MixLevelAnnotatorPolicy(self) self.pending = [] # list of (ll_function, graph, args_s, s_result) - self.delayedreprs = {} + self.delayedreprs = set() self.delayedconsts = [] self.delayedfuncs = [] self.newgraphs = set() @@ -193,7 +193,7 @@ else: delayed = r.set_setup_maybe_delayed() if delayed: - self.delayedreprs[r] = True + self.delayedreprs.add(r) return r def s_r_instanceof(self, cls, can_be_None=True, check_never_seen=True): From noreply at buildbot.pypy.org Sat Nov 30 22:58:09 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 30 Nov 2013 22:58:09 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20131130215809.4F90F1C01B0@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68349:e797f7673b02 Date: 2013-11-30 15:57 -0600 http://bitbucket.org/pypy/pypy/changeset/e797f7673b02/ Log: merged upstream diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -33,14 +33,13 @@ # set link options output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + # XXX pyconfig.h uses a pragma to link to the import library, + # which is currently python27.lib + library = os.path.join(thisdir, '..', 'include', 'python27') if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + # For a local translation or nightly build + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') + assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 '/EXPORT:init' + modulename] diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -18,3 +18,7 @@ .. branch: voidtype_strformat Better support for record numpy arrays + +.. branch: osx-eci-frameworks-makefile +OSX: Ensure frameworks end up in Makefile when specified in External compilation info + diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -424,6 +424,12 @@ def get_array_length(self): return self.length + def _sizeof(self): + from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray + ctype = self.ctype + assert isinstance(ctype, W_CTypePtrOrArray) + return self.length * ctype.ctitem.size + class W_CDataHandle(W_CData): _attrs_ = ['w_keepalive'] diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3125,6 +3125,12 @@ py.test.raises(TypeError, "p + cast(new_primitive_type('int'), 42)") py.test.raises(TypeError, "p - cast(new_primitive_type('int'), 42)") +def test_sizeof_sliced_array(): + BInt = new_primitive_type("int") + BArray = new_array_type(new_pointer_type(BInt), 10) + p = newp(BArray, None) + assert sizeof(p[2:9]) == 7 * sizeof(BInt) + def test_version(): # this test is here mostly for PyPy diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -1,4 +1,5 @@ import sys +import os from rpython.config.config import OptionDescription, BoolOption, IntOption, ArbitraryOption, FloatOption from rpython.config.config import ChoiceOption, StrOption, Config from rpython.config.config import ConfigError @@ -20,6 +21,9 @@ IS_64_BITS = sys.maxint > 2147483647 +MAINDIR = os.path.dirname(os.path.dirname(__file__)) +CACHE_DIR = os.path.realpath(os.path.join(MAINDIR, '_cache')) + PLATFORMS = [ 'maemo', 'host', diff --git a/rpython/conftest.py b/rpython/conftest.py --- a/rpython/conftest.py +++ b/rpython/conftest.py @@ -5,7 +5,6 @@ pytest_plugins = 'rpython.tool.pytest.expecttest' cdir = realpath(join(dirname(__file__), 'translator', 'c')) -cache_dir = realpath(join(dirname(__file__), '_cache')) option = None def braindead_deindent(self): diff --git a/rpython/rlib/test/test_rstacklet.py b/rpython/rlib/test/test_rstacklet.py --- a/rpython/rlib/test/test_rstacklet.py +++ b/rpython/rlib/test/test_rstacklet.py @@ -74,8 +74,8 @@ h = self.sthread.new(switchbackonce_callback, rffi.cast(llmemory.Address, 321)) # 'h' ignored - if (i % 5000) == 2500: - rgc.collect() + if (i % 2000) == 1000: + rgc.collect() # This should run in < 1.5GB virtual memory def any_alive(self): for task in self.tasks: diff --git a/rpython/tool/gcc_cache.py b/rpython/tool/gcc_cache.py --- a/rpython/tool/gcc_cache.py +++ b/rpython/tool/gcc_cache.py @@ -1,15 +1,13 @@ -from rpython.translator.platform import CompilationError -from rpython.conftest import cache_dir from hashlib import md5 import py, os -cache_dir_root = py.path.local(cache_dir).ensure(dir=1) - def cache_file_path(c_files, eci, cachename): "Builds a filename to cache compilation data" # Import 'platform' every time, the compiler may have been changed from rpython.translator.platform import platform - cache_dir = cache_dir_root.join(cachename).ensure(dir=1) + from rpython.config.translationoption import CACHE_DIR + cache_root = py.path.local(CACHE_DIR).ensure(dir=1) + cache_dir = cache_root.join(cachename).ensure(dir=1) filecontents = [c_file.read() for c_file in c_files] key = repr((filecontents, eci, platform.key())) hash = md5(key).hexdigest() diff --git a/rpython/tool/test/test_gcc_cache.py b/rpython/tool/test/test_gcc_cache.py --- a/rpython/tool/test/test_gcc_cache.py +++ b/rpython/tool/test/test_gcc_cache.py @@ -1,8 +1,11 @@ import sys -from rpython.tool.gcc_cache import * +import cStringIO +import py from rpython.tool.udir import udir -import md5, cStringIO from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.translator.platform import CompilationError +from rpython.tool.gcc_cache import ( + cache_file_path, build_executable_cache, try_compile_cache) localudir = udir.join('test_gcc_cache').ensure(dir=1) @@ -90,4 +93,3 @@ finally: sys.stderr = oldstderr assert 'ERROR' not in capture.getvalue().upper() - diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -470,7 +470,8 @@ return py.path.local(newexename) def create_exe(self): - """ Copy the compiled executable into translator/goal + """ Copy the compiled executable into current directory, which is + pypy/goal on nightly builds """ if self.exe_name is not None: exename = self.c_entryp @@ -482,8 +483,11 @@ shutil.copy(str(soname), str(newsoname)) self.log.info("copied: %s" % (newsoname,)) if sys.platform == 'win32': - shutil.copyfile(str(soname.new(ext='lib')), - str(newsoname.new(ext='lib'))) + # the import library is named python27.lib, according + # to the pragma in pyconfig.h + libname = str(newsoname.dirpath().join('python27.lib')) + shutil.copyfile(str(soname.new(ext='lib')), libname) + self.log.info("copied: %s" % (libname,)) self.c_entryp = newexename self.log.info('usession directory: %s' % (udir,)) self.log.info("created: %s" % (self.c_entryp,)) diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -7,19 +7,17 @@ import os import sys -from rpython.conftest import cache_dir - import py -# clean up early rpython/_cache -try: - py.path.local(cache_dir).remove() -except Exception: - pass - from rpython.config.config import (to_optparse, OptionDescription, BoolOption, ArbitraryOption, StrOption, IntOption, Config, ChoiceOption, OptHelpFormatter) from rpython.config.translationoption import (get_combined_translation_config, - set_opt_level, OPT_LEVELS, DEFAULT_OPT_LEVEL, set_platform) + set_opt_level, OPT_LEVELS, DEFAULT_OPT_LEVEL, set_platform, CACHE_DIR) + +# clean up early rpython/_cache +try: + py.path.local(CACHE_DIR).remove() +except Exception: + pass GOALS = [ diff --git a/rpython/translator/test/test_driver.py b/rpython/translator/test/test_driver.py --- a/rpython/translator/test/test_driver.py +++ b/rpython/translator/test/test_driver.py @@ -1,6 +1,7 @@ import py - +import os from rpython.translator.driver import TranslationDriver +from rpython.tool.udir import udir def test_ctr(): td = TranslationDriver() @@ -44,3 +45,33 @@ 'compile_c', 'pyjitpl'] assert set(td.exposed) == set(expected) + + +def test_create_exe(): + if not os.name == 'nt': + py.test.skip('Windows only test') + + dst_name = udir.join('dst/pypy.exe') + src_name = udir.join('src/dydy2.exe') + dll_name = udir.join('src/pypy.dll') + lib_name = udir.join('src/pypy.lib') + src_name.ensure() + src_name.write('exe') + dll_name.ensure() + dll_name.write('dll') + lib_name.ensure() + lib_name.write('lib') + dst_name.ensure() + + class CBuilder(object): + shared_library_name = dll_name + + td = TranslationDriver(exe_name=str(dst_name)) + td.c_entryp = str(src_name) + td.cbuilder = CBuilder() + td.create_exe() + assert dst_name.read() == 'exe' + assert dst_name.new(ext='dll').read() == 'dll' + assert dst_name.new(purebasename='python27',ext='lib').read() == 'lib' + + diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -159,6 +159,8 @@ else: msg = "Killed by %s." % getsignalname(-exitcode) extralog = "! %s\n %s\n" % (test, msg) + else: + extralog = " (somefailed=True in %s)\n" % (test,) else: failure = False return failure, extralog @@ -261,7 +263,8 @@ done += 1 failure = failure or somefailed - heading = "__ %s [%d done in total] " % (testname, done) + heading = "__ %s [%d done in total, somefailed=%s] " % ( + testname, done, somefailed) out.write(heading + (79-len(heading))*'_'+'\n') diff --git a/testrunner/test/test_runner.py b/testrunner/test/test_runner.py --- a/testrunner/test/test_runner.py +++ b/testrunner/test/test_runner.py @@ -171,7 +171,7 @@ failure, extralog = runner.interpret_exitcode(1, "test_foo", "F Foo\n") assert failure - assert extralog == "" + assert extralog == " (somefailed=True in test_foo)\n" failure, extralog = runner.interpret_exitcode(2, "test_foo") assert failure